diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-11 16:32:41 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-11 16:32:41 -0800 |
commit | 4162cf64973df51fc885825bc9ca4d055891c49f (patch) | |
tree | f218c7bd298f4d41be94d08a314eb9fbc3fcb4ea | |
parent | fb7b5a956992fdc53d0b9c8ea29b51b92839dc1b (diff) | |
parent | 343a8d13aae58dec562dbb5c7d48a53e9b847871 (diff) | |
download | kernel_samsung_crespo-4162cf64973df51fc885825bc9ca4d055891c49f.zip kernel_samsung_crespo-4162cf64973df51fc885825bc9ca4d055891c49f.tar.gz kernel_samsung_crespo-4162cf64973df51fc885825bc9ca4d055891c49f.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (67 commits)
cxgb4vf: recover from failure in cxgb4vf_open()
netfilter: ebtables: make broute table work again
netfilter: fix race in conntrack between dump_table and destroy
ah: reload pointers to skb data after calling skb_cow_data()
ah: update maximum truncated ICV length
xfrm: check trunc_len in XFRMA_ALG_AUTH_TRUNC
ehea: Increase the skb array usage
net/fec: remove config FEC2 as it's used nowhere
pcnet_cs: add new_id
tcp: disallow bind() to reuse addr/port
net/r8169: Update the function of parsing firmware
net: ppp: use {get,put}_unaligned_be{16,32}
CAIF: Fix IPv6 support in receive path for GPRS/3G
arp: allow to invalidate specific ARP entries
net_sched: factorize qdisc stats handling
mlx4: Call alloc_etherdev to allocate RX and TX queues
net: Add alloc_netdev_mqs function
caif: don't set connection request param size before copying data
cxgb4vf: fix mailbox data/control coherency domain race
qlcnic: change module parameter permissions
...
109 files changed, 2901 insertions, 1867 deletions
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt index b395ca6..811872b 100644 --- a/Documentation/networking/dccp.txt +++ b/Documentation/networking/dccp.txt @@ -167,6 +167,7 @@ rx_ccid = 2 seq_window = 100 The initial sequence window (sec. 7.5.2) of the sender. This influences the local ackno validity and the remote seqno validity windows (7.5.1). + Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set. tx_qlen = 5 The size of the transmit buffer in packets. A value of 0 corresponds diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index ffe9b65..9f47e86 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { const struct firmware *fw; unsigned long start_address; const struct ihex_binrec *rec; + const char *errmsg = 0; int res; - + res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); if (res) { PRINTK (KERN_ERR, "Cannot load microcode data"); @@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { /* First record contains just the start address */ rec = (const struct ihex_binrec *)fw->data; if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { - PRINTK (KERN_ERR, "Bad microcode data (no start record)"); - return -EINVAL; + errmsg = "no start record"; + goto fail; } start_address = be32_to_cpup((__be32 *)rec->data); @@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), be16_to_cpu(rec->len)); if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { - PRINTK (KERN_ERR, "Bad microcode data (record too long)"); - return -EINVAL; + errmsg = "record too long"; + goto fail; } if (be16_to_cpu(rec->len) & 3) { - PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)"); - return -EINVAL; + errmsg = "odd number of bytes"; + goto fail; } res = loader_write(lb, dev, rec); if (res) @@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) { res = loader_start(lb, dev, start_address); return res; +fail: + release_firmware(fw); + PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg); + return -EINVAL; } /********** give adapter parameters **********/ diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 3fda24a..ff652c7 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1944,19 +1944,12 @@ config 68360_ENET config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ - MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 + MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 select PHYLIB help Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. -config FEC2 - bool "Second FEC ethernet controller (on some ColdFire CPUs)" - depends on FEC - help - Say Y here if you want to use the second built-in 10/100 Fast - ethernet controller on some Motorola ColdFire processors. - config FEC_MPC52xx tristate "MPC52xx FEC driver" depends on PPC_MPC52xx && PPC_BESTCOMM diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index ce1e5e9..0b9fc51 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -8,6 +8,11 @@ * Licensed under the GPL-2 or later. */ +#define DRV_VERSION "1.1" +#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> @@ -41,12 +46,7 @@ #include "bfin_mac.h" -#define DRV_NAME "bfin_mac" -#define DRV_VERSION "1.1" -#define DRV_AUTHOR "Bryan Wu, Luke Yang" -#define DRV_DESC "Blackfin on-chip Ethernet MAC driver" - -MODULE_AUTHOR(DRV_AUTHOR); +MODULE_AUTHOR("Bryan Wu, Luke Yang"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:bfin_mac"); @@ -189,8 +189,7 @@ static int desc_list_init(void) /* allocate a new skb for next time receive */ new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - printk(KERN_NOTICE DRV_NAME - ": init: low on mem - packet dropped\n"); + pr_notice("init: low on mem - packet dropped\n"); goto init_error; } skb_reserve(new_skb, NET_IP_ALIGN); @@ -240,7 +239,7 @@ static int desc_list_init(void) init_error: desc_list_free(); - printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); + pr_err("kmalloc failed\n"); return -ENOMEM; } @@ -259,8 +258,7 @@ static int bfin_mdio_poll(void) while ((bfin_read_EMAC_STAADD()) & STABUSY) { udelay(1); if (timeout_cnt-- < 0) { - printk(KERN_ERR DRV_NAME - ": wait MDC/MDIO transaction to complete timeout\n"); + pr_err("wait MDC/MDIO transaction to complete timeout\n"); return -ETIMEDOUT; } } @@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev) opmode &= ~RMII_10; break; default: - printk(KERN_WARNING - "%s: Ack! Speed (%d) is not 10/100!\n", - DRV_NAME, phydev->speed); + netdev_warn(dev, + "Ack! Speed (%d) is not 10/100!\n", + phydev->speed); break; } bfin_write_EMAC_OPMODE(opmode); @@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode) /* now we are supposed to have a proper phydev, to attach to... */ if (!phydev) { - printk(KERN_INFO "%s: Don't found any phy device at all\n", - dev->name); + netdev_err(dev, "no phy device found\n"); return -ENODEV; } if (phy_mode != PHY_INTERFACE_MODE_RMII && phy_mode != PHY_INTERFACE_MODE_MII) { - printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); + netdev_err(dev, "invalid phy interface mode\n"); return -EINVAL; } @@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode) 0, phy_mode); if (IS_ERR(phydev)) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + netdev_err(dev, "could not attach PHY\n"); return PTR_ERR(phydev); } @@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_duplex = -1; lp->phydev = phydev; - printk(KERN_INFO "%s: attached PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" - "@sclk=%dMHz)\n", - DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, - MDC_CLK, mdc_div, sclk/1000000); + pr_info("attached PHY driver [%s] " + "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", + phydev->drv->name, dev_name(&phydev->dev), phydev->irq, + MDC_CLK, mdc_div, sclk/1000000); return 0; } @@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - strcpy(info->driver, DRV_NAME); + strcpy(info->driver, KBUILD_MODNAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); strcpy(info->bus_info, dev_name(&dev->dev)); @@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { }; /**************************************************************************/ -void setup_system_regs(struct net_device *dev) +static void setup_system_regs(struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); int i; @@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev) bfin_write_EMAC_MMC_CTL(RSTC | CROLL); + /* Set vlan regs to let 1522 bytes long packets pass through */ + bfin_write_EMAC_VLAN1(lp->vlan1_mask); + bfin_write_EMAC_VLAN2(lp->vlan2_mask); + /* Initialize the TX DMA channel registers */ bfin_write_DMA2_X_COUNT(0); bfin_write_DMA2_X_MODIFY(4); @@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) udelay(1); if (timeout_cnt == 0) - printk(KERN_ERR DRV_NAME - ": fails to timestamp the TX packet\n"); + netdev_err(netdev, "timestamp the TX packet failed\n"); else { struct skb_shared_hwtstamps shhwtstamps; u64 ns; @@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev) * we which case we simply drop the packet */ if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { - printk(KERN_NOTICE DRV_NAME - ": rx: receive error - packet dropped\n"); + netdev_notice(dev, "rx: receive error - packet dropped\n"); dev->stats.rx_dropped++; goto out; } @@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev) new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); if (!new_skb) { - printk(KERN_NOTICE DRV_NAME - ": rx: low on mem - packet dropped\n"); + netdev_notice(dev, "rx: low on mem - packet dropped\n"); dev->stats.rx_dropped++; goto out; } @@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev) int ret; u32 opmode; - pr_debug("%s: %s\n", DRV_NAME, __func__); + pr_debug("%s\n", __func__); /* Set RX DMA */ bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); @@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) u32 sysctl; if (dev->flags & IFF_PROMISC) { - printk(KERN_INFO "%s: set to promisc mode\n", dev->name); + netdev_info(dev, "set promisc mode\n"); sysctl = bfin_read_EMAC_OPMODE(); sysctl |= PR; bfin_write_EMAC_OPMODE(sysctl); @@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev) * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ if (!is_valid_ether_addr(dev->dev_addr)) { - printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); + netdev_warn(dev, "no valid ethernet hw addr\n"); return -EINVAL; } @@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) goto out_err_mii_probe; } + lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask; + lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask; + /* Fill in the fields of the device structure with ethernet values. */ ether_setup(ndev); @@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) bfin_mac_hwtstamp_init(ndev); /* now, print out the card info, in a short format.. */ - dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); + netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); return 0; @@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) * so set the GPIO pins to Ethernet mode */ pin_req = mii_bus_pd->mac_peripherals; - rc = peripheral_request_list(pin_req, DRV_NAME); + rc = peripheral_request_list(pin_req, KBUILD_MODNAME); if (rc) { dev_err(&pdev->dev, "Requesting peripherals failed!\n"); return rc; @@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = { .resume = bfin_mac_resume, .suspend = bfin_mac_suspend, .driver = { - .name = DRV_NAME, + .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, }; diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index aed68be..f8559ac 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h @@ -17,7 +17,14 @@ #include <linux/etherdevice.h> #include <linux/bfin_mac.h> +/* + * Disable hardware checksum for bug #5600 if writeback cache is + * enabled. Otherwize, corrupted RX packet will be sent up stack + * without error mark. + */ +#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK #define BFIN_MAC_CSUM_OFFLOAD +#endif #define TX_RECLAIM_JIFFIES (HZ / 5) @@ -68,7 +75,6 @@ struct bfin_mac_local { */ struct net_device_stats stats; - unsigned char Mac[6]; /* MAC address of the board */ spinlock_t lock; int wol; /* Wake On Lan */ @@ -76,6 +82,9 @@ struct bfin_mac_local { struct timer_list tx_reclaim_timer; struct net_device *ndev; + /* Data for EMAC_VLAN1 regs */ + u16 vlan1_mask, vlan2_mask; + /* MII and PHY stuffs */ int old_link; /* used by bf537_adjust_link */ int old_speed; diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 77d6c8d..6a858a2 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -636,6 +636,7 @@ struct bnx2x_common { #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) +#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) int flash_size; #define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index dc18c25..fb3ff7c 100644 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h @@ -1,10 +1,16 @@ /* bnx2x_dump.h: Broadcom Everest network driver. * - * Copyright (c) 2009 Broadcom Corporation + * Copyright (c) 2011 Broadcom Corporation * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. */ @@ -17,53 +23,53 @@ #define BNX2X_DUMP_H -struct dump_sign { - u32 time_stamp; - u32 diag_ver; - u32 grc_dump_ver; -}; -#define TSTORM_WAITP_ADDR 0x1b8a80 -#define CSTORM_WAITP_ADDR 0x238a80 -#define XSTORM_WAITP_ADDR 0x2b8a80 -#define USTORM_WAITP_ADDR 0x338a80 -#define TSTORM_CAM_MODE 0x1b1440 +/*definitions */ +#define XSTORM_WAITP_ADDR 0x2b8a80 +#define TSTORM_WAITP_ADDR 0x1b8a80 +#define USTORM_WAITP_ADDR 0x338a80 +#define CSTORM_WAITP_ADDR 0x238a80 +#define TSTORM_CAM_MODE 0x1B1440 -#define RI_E1 0x1 -#define RI_E1H 0x2 +#define MAX_TIMER_PENDING 200 +#define TIMER_SCAN_DONT_CARE 0xFF +#define RI_E1 0x1 +#define RI_E1H 0x2 #define RI_E2 0x4 -#define RI_ONLINE 0x100 +#define RI_ONLINE 0x100 #define RI_PATH0_DUMP 0x200 #define RI_PATH1_DUMP 0x400 -#define RI_E1_OFFLINE (RI_E1) -#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) -#define RI_E1H_OFFLINE (RI_E1H) -#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) -#define RI_E2_OFFLINE (RI_E2) -#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) -#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) -#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) -#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) -#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) -#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) -#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) -#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) -#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) - -#define MAX_TIMER_PENDING 200 -#define TIMER_SCAN_DONT_CARE 0xFF +#define RI_E1_OFFLINE (RI_E1) +#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) +#define RI_E1H_OFFLINE (RI_E1H) +#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) +#define RI_E2_OFFLINE (RI_E2) +#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) +#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) +#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) +#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) +#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) +#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) +#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) +#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) +#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) +struct dump_sign { + u32 time_stamp; + u32 diag_ver; + u32 grc_dump_ver; +}; struct dump_hdr { - u32 hdr_size; /* in dwords, excluding this field */ - struct dump_sign dump_sign; - u32 xstorm_waitp; - u32 tstorm_waitp; - u32 ustorm_waitp; - u32 cstorm_waitp; - u16 info; - u8 idle_chk; - u8 reserved; + u32 hdr_size; /* in dwords, excluding this field */ + struct dump_sign dump_sign; + u32 xstorm_waitp; + u32 tstorm_waitp; + u32 ustorm_waitp; + u32 cstorm_waitp; + u16 info; + u8 idle_chk; + u8 reserved; }; struct reg_addr { @@ -80,202 +86,185 @@ struct wreg_addr { u16 info; }; - -#define REGS_COUNT 558 +#define REGS_COUNT 834 static const struct reg_addr reg_addrs[REGS_COUNT] = { { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, - { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, - { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, - { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, - { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, - { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, - { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, - { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, - { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, - { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, - { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, - { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, - { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, - { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, - { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, - { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, - { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, - { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, - { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, - { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, - { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, - { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, - { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, - { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, - { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, - { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, - { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, - { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, - { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, - { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, - { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, - { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, - { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, - { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, - { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, - { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, - { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, - { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, - { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, - { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, - { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, - { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, - { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, - { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, - { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, - { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, + { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, + { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, + { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, + { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, + { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, + { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, + { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, + { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, + { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, + { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, + { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, + { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, + { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, + { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, + { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, + { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, + { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, + { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, + { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, + { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, + { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, + { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, + { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, + { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, + { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, + { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, + { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, + { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, + { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, + { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, + { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, + { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, + { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, + { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, + { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, + { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, + { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, + { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, + { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, + { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, + { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, + { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, + { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, + { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, + { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, + { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, + { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, + { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, + { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, + { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, + { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, + { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, + { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, + { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, + { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, + { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, + { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, + { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, + { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, + { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, - { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, - { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, - { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, - { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, - { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, - { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, - { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, - { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, - { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, - { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, + { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, + { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, + { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, + { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, + { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, + { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, + { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, + { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, + { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, + { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, + { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, + { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, + { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, - { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, - { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, - { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, + { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, + { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, + { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, + { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, + { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, + { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, + { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, + { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, - { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, - { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, - { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, - { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, - { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE }, - { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE }, - { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE }, - { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE }, + { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, + { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, + { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, + { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, + { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, + { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, - { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, - { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, - { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, - { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, + { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, + { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, + { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, + { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, - { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, - { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, - { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, - { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, - { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, - { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, - { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, - { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, - { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, - { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, + { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, + { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, + { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, + { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, + { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, + { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, + { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, + { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, + { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, + { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, - { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, - { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, - { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, - { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE }, + { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, + { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, + { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, - { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, - { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, - { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, + { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, + { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, + { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, + { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, - { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, - { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, - { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, - { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, - { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, - { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, - { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, - { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, - { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, - { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, - { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, - { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, - { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, - { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, - { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, - { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, - { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, - { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, - { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, - { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, - { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, - { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, - { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, - { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, - { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, - { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, - { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, - { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, - { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, - { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, - { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, - { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, - { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, - { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, - { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, - { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, - { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, - { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, - { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, - { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, - { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, - { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, - { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, - { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, - { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, - { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, - { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, - { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, - { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, - { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, - { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, - { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, - { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE }, - { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE }, - { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE }, - { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE }, - { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE }, - { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE }, - { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE }, - { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE }, - { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE }, - { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, - { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE }, - { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, - { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE }, - { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, - { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE }, - { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE }, - { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE }, - { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE }, - { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE }, - { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE }, - { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE }, - { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE }, - { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE }, - { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, - { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE }, - { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE }, - { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, - { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE }, - { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE }, - { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE }, - { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, - { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE }, - { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE }, - { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE }, - { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, + { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, + { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, + { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, + { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, + { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, + { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, + { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, + { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, + { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, + { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, + { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, + { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, + { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, + { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, + { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, + { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, + { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, + { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, + { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, + { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, + { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, + { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, + { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, + { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, + { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, + { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, + { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, + { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, + { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, + { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, + { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, + { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, + { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, + { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, + { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, + { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, + { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, + { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, + { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, + { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, + { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, + { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, + { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, + { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, + { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, + { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, + { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, + { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, + { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, + { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, + { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, + { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, @@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = { { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, - { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, - { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE }, + { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, - { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, - { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, - { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, - { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, - { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, - { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, - { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, - { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, - { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, - { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, - { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, - { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, - { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, - { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, - { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, + { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, + { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, + { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, + { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, + { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE }, + { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE }, + { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE }, + { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, + { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, + { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, + { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, + { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, + { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, + { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, + { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, + { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, + { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, + { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, + { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, + { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, + { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, + { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, + { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, + { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, + { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, + { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, + { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, - { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, - { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, - { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, - { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, - { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, - { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, - { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, + { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, + { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, + { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, + { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, + { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, + { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, + { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, + { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, + { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, + { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, + { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, + { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, + { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, + { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, + { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, + { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, + { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, - { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, - { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, - { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, - { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, - { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, - { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, - { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, - { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, - { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, - { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, - { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, - { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, - { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, - { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, - { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, - { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, - { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, - { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, - { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, - { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, - { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, - { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, - { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, - { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, - { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, - { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, - { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, - { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, - { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, - { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, - { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, - { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, - { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, - { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, - { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, - { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, - { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, + { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, + { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, + { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, + { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, + { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, + { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, + { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, + { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, + { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, + { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, + { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, + { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, + { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, + { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, + { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, + { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, + { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, + { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, + { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, + { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, + { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, + { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, + { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, + { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, + { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, + { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, + { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, + { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, + { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, + { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, + { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, + { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, + { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, + { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, + { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, + { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, + { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, + { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, + { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, + { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, + { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, + { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, + { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, + { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, + { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, + { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, + { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, + { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, + { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, + { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, + { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE }, + { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE }, + { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE }, + { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE }, + { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE }, + { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, + { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, + { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, + { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, + { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, + { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, + { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, + { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE }, + { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE }, + { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE }, + { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE }, + { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, + { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, + { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, + { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, + { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, + { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, + { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, + { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, + { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, + { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, + { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, + { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, + { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, + { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, + { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, + { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, + { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, + { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, + { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, + { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, + { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, + { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, + { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, + { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, + { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, + { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, + { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, + { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, + { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, + { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, + { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, + { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, + { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, + { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, + { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, + { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, + { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, + { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, + { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, + { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, + { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, + { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, + { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, + { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, + { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, + { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, + { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, + { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, + { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, + { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, + { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, + { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, + { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, + { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, + { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, + { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, + { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, + { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, + { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, + { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, + { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, + { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, + { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, + { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, + { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, + { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, + { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, + { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, + { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, + { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, - { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, - { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, - { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, - { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, - { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, - { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, - { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, + { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, + { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, + { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, + { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, + { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, + { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, + { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, + { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, + { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, + { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, + { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, + { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, + { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, + { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, + { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, + { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, + { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE }, { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, - { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, - { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } + { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE }, + { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE }, + { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE }, + { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, + { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, + { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, + { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, + { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, + { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, + { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, + { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, + { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE }, + { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE }, + { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, + { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, + { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, + { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, + { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, + { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, + { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, + { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, + { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, + { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, + { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, + { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, + { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, }; - -#define IDLE_REGS_COUNT 277 +#define IDLE_REGS_COUNT 237 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { - { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, - { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, - { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, + { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, + { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, + { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, + { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, + { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, + { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, + { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, + { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, - { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, - { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, - { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, - { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, - { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, - { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, - { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, - { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, - { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, - { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, - { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, - { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, - { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, - { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, - { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, - { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, - { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, - { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, - { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, - { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, - { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, - { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, - { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, - { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, - { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, - { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, - { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, - { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, - { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, - { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, - { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, - { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, - { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, + { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, + { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, + { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, + { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, + { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, + { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, + { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, + { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, + { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, + { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE }, + { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE }, + { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE }, + { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE }, + { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE }, + { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE }, + { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE }, + { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE }, + { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE }, + { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE }, + { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE }, + { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE }, + { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE }, + { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE }, + { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE }, + { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE }, + { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE }, + { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE }, + { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE }, + { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE }, + { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE }, + { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE }, + { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE }, + { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, + { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, + { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, + { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, + { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, - { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, - { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, - { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, - { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, - { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, - { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, - { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, - { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, - { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE }, - { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE }, - { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE }, - { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE }, - { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE }, - { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, - { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, - { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, - { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE }, - { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE }, - { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE }, - { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE }, - { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE }, - { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, - { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, - { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, - { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE }, - { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE }, - { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE }, - { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE }, - { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE }, - { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE }, - { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE }, - { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE }, - { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE }, - { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE }, - { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE }, - { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE }, - { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE }, - { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE }, - { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE }, + { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE }, + { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE }, + { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE }, + { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, + { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, + { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, + { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, + { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, @@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, - { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, - { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, - { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, - { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, - { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, - { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, - { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, - { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, - { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, - { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, - { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, - { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, - { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, - { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, + { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, + { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, + { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, + { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, + { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, + { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, + { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, + { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, + { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE }, + { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE }, + { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE }, + { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE }, + { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE }, + { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE }, + { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE }, + { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE }, + { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE }, + { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE }, + { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, - { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, - { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, - { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, - { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, - { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, - { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, - { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, - { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, - { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, - { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, - { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, - { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, - { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, - { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, - { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, - { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, - { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, - { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, - { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, - { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, - { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE }, - { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, + { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, + { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, + { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, + { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, + { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, + { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, + { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE }, + { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE }, + { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, + { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, + { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE }, + { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE }, + { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE }, + { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, + { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, + { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE }, + { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE }, + { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE }, + { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, + { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, - { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, - { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, + { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, { 0x3380c0, 1, RI_ALL_ONLINE } }; @@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } }; - #define WREGS_COUNT_E1H 1 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; @@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } }; -static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; - +static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; #define TIMER_REGS_COUNT_E1 2 -static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = - { 0x164014, 0x164018 }; -static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = - { 0x1640d0, 0x1640d4 }; +static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { + 0x164014, 0x164018 }; +static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { + 0x1640d0, 0x1640d4 }; #define TIMER_REGS_COUNT_E1H 2 -static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = - { 0x164014, 0x164018 }; -static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = - { 0x1640d0, 0x1640d4 }; +static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { + 0x164014, 0x164018 }; +static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { + 0x1640d0, 0x1640d4 }; + +#define TIMER_REGS_COUNT_E2 2 + +static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { + 0x164014, 0x164018 }; +static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { + 0x1640d0, 0x1640d4 }; + +#define PAGE_MODE_VALUES_E1 0 + +#define PAGE_READ_REGS_E1 0 + +#define PAGE_WRITE_REGS_E1 0 + +static const u32 page_vals_e1[] = { 0 }; + +static const u32 page_write_regs_e1[] = { 0 }; + +static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; + +#define PAGE_MODE_VALUES_E1H 0 + +#define PAGE_READ_REGS_E1H 0 + +#define PAGE_WRITE_REGS_E1H 0 + +static const u32 page_vals_e1h[] = { 0 }; + +static const u32 page_write_regs_e1h[] = { 0 }; + +static const struct reg_addr page_read_regs_e1h[] = { + { 0x0, 0, RI_E1H_ONLINE } }; #define PAGE_MODE_VALUES_E2 2 diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 99c672d..5b44a8b 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -24,6 +24,7 @@ #include "bnx2x.h" #include "bnx2x_cmn.h" #include "bnx2x_dump.h" +#include "bnx2x_init.h" /* Note: in the format strings below %s is replaced by the queue-name which is * either its index or 'fcoe' for the fcoe queue. Make sure the format string @@ -472,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); int regdump_len = 0; - int i; + int i, j, k; if (CHIP_IS_E1(bp)) { for (i = 0; i < REGS_COUNT; i++) @@ -502,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev) if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) regdump_len += wreg_addrs_e2[i].size * (1 + wreg_addrs_e2[i].read_regs_count); + + for (i = 0; i < PAGE_MODE_VALUES_E2; i++) + for (j = 0; j < PAGE_WRITE_REGS_E2; j++) { + for (k = 0; k < PAGE_READ_REGS_E2; k++) + if (IS_E2_ONLINE(page_read_regs_e2[k]. + info)) + regdump_len += + page_read_regs_e2[k].size; + } } regdump_len *= 4; regdump_len += sizeof(struct dump_hdr); @@ -539,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev, if (!netif_running(bp->dev)) return; + /* Disable parity attentions as long as following dump may + * cause false alarms by reading never written registers. We + * will re-enable parity attentions right after the dump. + */ + bnx2x_disable_blocks_parity(bp); + dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; dump_hdr.dump_sign = dump_sign_all; dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); @@ -580,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev, bnx2x_read_pages_regs_e2(bp, p); } + /* Re-enable parity attentions */ + bnx2x_clear_blocks_parity(bp); + if (CHIP_PARITY_ENABLED(bp)) + bnx2x_enable_blocks_parity(bp); } #define PHY_FW_VER_LEN 20 diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index a9d5487..5a268e9 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h @@ -192,5 +192,225 @@ struct src_ent { u64 next; }; +/**************************************************************************** +* Parity configuration +****************************************************************************/ +#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \ +{ \ + block##_REG_##block##_PRTY_MASK, \ + block##_REG_##block##_PRTY_STS_CLR, \ + en_mask, {m1, m1h, m2}, #block \ +} + +#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \ +{ \ + block##_REG_##block##_PRTY_MASK_0, \ + block##_REG_##block##_PRTY_STS_CLR_0, \ + en_mask, {m1, m1h, m2}, #block"_0" \ +} + +#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \ +{ \ + block##_REG_##block##_PRTY_MASK_1, \ + block##_REG_##block##_PRTY_STS_CLR_1, \ + en_mask, {m1, m1h, m2}, #block"_1" \ +} + +static const struct { + u32 mask_addr; + u32 sts_clr_addr; + u32 en_mask; /* Mask to enable parity attentions */ + struct { + u32 e1; /* 57710 */ + u32 e1h; /* 57711 */ + u32 e2; /* 57712 */ + } reg_mask; /* Register mask (all valid bits) */ + char name[7]; /* Block's longest name is 6 characters long + * (name + suffix) + */ +} bnx2x_blocks_parity_data[] = { + /* bit 19 masked */ + /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */ + /* bit 5,18,20-31 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */ + /* bit 5 */ + /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */ + /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */ + /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */ + + /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't + * want to handle "system kill" flow at the moment. + */ + BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), + {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf}, "UPB"}, + {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, + GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, + {0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f), +}; + + +/* [28] MCP Latched rom_parity + * [29] MCP Latched ump_rx_parity + * [30] MCP Latched ump_tx_parity + * [31] MCP Latched scpad_parity + */ +#define MISC_AEU_ENABLE_MCP_PRTY_BITS \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) + +/* Below registers control the MCP parity attention output. When + * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are + * enabled, when cleared - disabled. + */ +static const u32 mcp_attn_ctl_regs[] = { + MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_0, + MISC_REG_AEU_ENABLE4_PXP_0, + MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0, + MISC_REG_AEU_ENABLE4_NIG_1, + MISC_REG_AEU_ENABLE4_PXP_1 +}; + +static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable) +{ + int i; + u32 reg_val; + + for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) { + reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]); + + if (enable) + reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; + else + reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; + + REG_WR(bp, mcp_attn_ctl_regs[i], reg_val); + } +} + +static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) +{ + if (CHIP_IS_E1(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1; + else if (CHIP_IS_E1H(bp)) + return bnx2x_blocks_parity_data[idx].reg_mask.e1h; + else + return bnx2x_blocks_parity_data[idx].reg_mask.e2; +} + +static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 dis_mask = bnx2x_parity_reg_mask(bp, i); + + if (dis_mask) { + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + dis_mask); + DP(NETIF_MSG_HW, "Setting parity mask " + "for %s to\t\t0x%x\n", + bnx2x_blocks_parity_data[i].name, dis_mask); + } + } + + /* Disable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, false); +} + +/** + * Clear the parity error status registers. + */ +static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) +{ + int i; + u32 reg_val, mcp_aeu_bits = + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY; + + /* Clear SEM_FAST parities */ + REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1); + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) { + reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i]. + sts_clr_addr); + if (reg_val & reg_mask) + DP(NETIF_MSG_HW, + "Parity errors in %s: 0x%x\n", + bnx2x_blocks_parity_data[i].name, + reg_val & reg_mask); + } + } + + /* Check if there were parity attentions in MCP */ + reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP); + if (reg_val & mcp_aeu_bits) + DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n", + reg_val & mcp_aeu_bits); + + /* Clear parity attentions in MCP: + * [7] clears Latched rom_parity + * [8] clears Latched ump_rx_parity + * [9] clears Latched ump_tx_parity + * [10] clears Latched scpad_parity (both ports) + */ + REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780); +} + +static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) { + u32 reg_mask = bnx2x_parity_reg_mask(bp, i); + + if (reg_mask) + REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr, + bnx2x_blocks_parity_data[i].en_mask & reg_mask); + } + + /* Enable MCP parity attentions */ + bnx2x_set_mcp_parity(bp, true); +} + + #endif /* BNX2X_INIT_H */ diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 489a551..84e1af4 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -3152,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS -#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) /* * should be run under rtnl lock @@ -3527,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) try to handle this event */ bnx2x_acquire_alr(bp); - if (bnx2x_chk_parity_attn(bp)) { + if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) { bp->recovery_state = BNX2X_RECOVERY_INIT; bnx2x_set_reset_in_progress(bp); schedule_delayed_work(&bp->reset_task, 0); @@ -4754,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) return 0; /* OK */ } -static void enable_blocks_attention(struct bnx2x *bp) +static void bnx2x_enable_blocks_attention(struct bnx2x *bp) { REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); if (CHIP_IS_E2(bp)) @@ -4808,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp) REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ - REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ + REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } -static const struct { - u32 addr; - u32 mask; -} bnx2x_parity_mask[] = { - {PXP_REG_PXP_PRTY_MASK, 0x3ffffff}, - {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, - {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f}, - {HC_REG_HC_PRTY_MASK, 0x7}, - {MISC_REG_MISC_PRTY_MASK, 0x1}, - {QM_REG_QM_PRTY_MASK, 0x0}, - {DORQ_REG_DORQ_PRTY_MASK, 0x0}, - {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, - {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, - {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ - {CDU_REG_CDU_PRTY_MASK, 0x0}, - {CFC_REG_CFC_PRTY_MASK, 0x0}, - {DBG_REG_DBG_PRTY_MASK, 0x0}, - {DMAE_REG_DMAE_PRTY_MASK, 0x0}, - {BRB1_REG_BRB1_PRTY_MASK, 0x0}, - {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ - {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */ - {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ - {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */ - {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ - {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, - {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, - {USEM_REG_USEM_PRTY_MASK_0, 0x0}, - {USEM_REG_USEM_PRTY_MASK_1, 0x0}, - {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, - {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, - {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, - {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} -}; - -static void enable_blocks_parity(struct bnx2x *bp) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++) - REG_WR(bp, bnx2x_parity_mask[i].addr, - bnx2x_parity_mask[i].mask); -} - - static void bnx2x_reset_common(struct bnx2x *bp) { /* reset_common */ @@ -5350,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) /* clear PXP2 attentions */ REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); - enable_blocks_attention(bp); - if (CHIP_PARITY_SUPPORTED(bp)) - enable_blocks_parity(bp); + bnx2x_enable_blocks_attention(bp); + if (CHIP_PARITY_ENABLED(bp)) + bnx2x_enable_blocks_parity(bp); if (!BP_NOMCP(bp)) { /* In E2 2-PORT mode, same ext phy is used for the two paths */ @@ -8751,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) dev_err(&bp->pdev->dev, "MCP disabled, " "must load devices in order!\n"); - /* Set multi queue mode */ - if ((multi_mode != ETH_RSS_MODE_DISABLED) && - ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { - dev_err(&bp->pdev->dev, "Multi disabled since int_mode " - "requested is not MSI-X\n"); - multi_mode = ETH_RSS_MODE_DISABLED; - } bp->multi_mode = multi_mode; bp->int_mode = int_mode; @@ -9560,9 +9508,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) /* Delete all NAPI objects */ bnx2x_del_all_napi(bp); + /* Power on: we can't let PCI layer write to us while we are in D3 */ + bnx2x_set_power_state(bp, PCI_D0); + /* Disable MSI/MSI-X */ bnx2x_disable_msi(bp); + /* Power off */ + bnx2x_set_power_state(bp, PCI_D3hot); + /* Make sure RESET task is not scheduled before continuing */ cancel_delayed_work_sync(&bp->reset_task); diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index bfd875b..38ef7ca 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -18,6 +18,8 @@ * WR - Write Clear (write 1 to clear the bit) * */ +#ifndef BNX2X_REG_H +#define BNX2X_REG_H #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) @@ -39,6 +41,8 @@ #define BRB1_REG_BRB1_PRTY_MASK 0x60138 /* [R 4] Parity register #0 read */ #define BRB1_REG_BRB1_PRTY_STS 0x6012c +/* [RC 4] Parity register #0 read clear */ +#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - @@ -132,8 +136,12 @@ #define CCM_REG_CCM_INT_MASK 0xd01e4 /* [R 11] Interrupt register #0 read */ #define CCM_REG_CCM_INT_STS 0xd01d8 +/* [RW 27] Parity mask register #0 read/write */ +#define CCM_REG_CCM_PRTY_MASK 0xd01f4 /* [R 27] Parity register #0 read */ #define CCM_REG_CCM_PRTY_STS 0xd01e8 +/* [RC 27] Parity register #0 read clear */ +#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). Is used to determine the number of the AG context REG-pairs written back; @@ -350,6 +358,8 @@ #define CDU_REG_CDU_PRTY_MASK 0x10104c /* [R 5] Parity register #0 read */ #define CDU_REG_CDU_PRTY_STS 0x101040 +/* [RC 5] Parity register #0 read clear */ +#define CDU_REG_CDU_PRTY_STS_CLR 0x101044 /* [RC 32] logging of error data in case of a CDU load error: {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; ype_error; ctual_active; ctual_compressed_context}; */ @@ -381,6 +391,8 @@ #define CFC_REG_CFC_PRTY_MASK 0x104118 /* [R 4] Parity register #0 read */ #define CFC_REG_CFC_PRTY_STS 0x10410c +/* [RC 4] Parity register #0 read clear */ +#define CFC_REG_CFC_PRTY_STS_CLR 0x104110 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ #define CFC_REG_CID_CAM 0x104800 #define CFC_REG_CONTROL0 0x104028 @@ -466,6 +478,8 @@ #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc /* [R 11] Parity register #0 read */ #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 +/* [RC 11] Parity register #0 read clear */ +#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4 #define CSDM_REG_ENABLE_IN1 0xc2238 #define CSDM_REG_ENABLE_IN2 0xc223c #define CSDM_REG_ENABLE_OUT1 0xc2240 @@ -556,6 +570,9 @@ /* [R 32] Parity register #0 read */ #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 +/* [RC 32] Parity register #0 read clear */ +#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128 +#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138 #define CSEM_REG_ENABLE_IN 0x2000a4 #define CSEM_REG_ENABLE_OUT 0x2000a8 /* [RW 32] This address space contains all registers and memories that are @@ -648,6 +665,8 @@ #define DBG_REG_DBG_PRTY_MASK 0xc0a8 /* [R 1] Parity register #0 read */ #define DBG_REG_DBG_PRTY_STS 0xc09c +/* [RC 1] Parity register #0 read clear */ +#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; * 4.Completion function=0; 5.Error handling=0 */ @@ -668,6 +687,8 @@ #define DMAE_REG_DMAE_PRTY_MASK 0x102064 /* [R 4] Parity register #0 read */ #define DMAE_REG_DMAE_PRTY_STS 0x102058 +/* [RC 4] Parity register #0 read clear */ +#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c /* [RW 1] Command 0 go. */ #define DMAE_REG_GO_C0 0x102080 /* [RW 1] Command 1 go. */ @@ -734,6 +755,8 @@ #define DORQ_REG_DORQ_PRTY_MASK 0x170190 /* [R 2] Parity register #0 read */ #define DORQ_REG_DORQ_PRTY_STS 0x170184 +/* [RC 2] Parity register #0 read clear */ +#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188 /* [RW 8] The address to write the DPM CID to STORM. */ #define DORQ_REG_DPM_CID_ADDR 0x170044 /* [RW 5] The DPM mode CID extraction offset. */ @@ -842,8 +865,12 @@ /* [R 1] data availble for error memory. If this bit is clear do not red * from error_handling_memory. */ #define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 +/* [RW 11] Parity mask register #0 read/write */ +#define IGU_REG_IGU_PRTY_MASK 0x1300a8 /* [R 11] Parity register #0 read */ #define IGU_REG_IGU_PRTY_STS 0x13009c +/* [RC 11] Parity register #0 read clear */ +#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0 /* [R 4] Debug: int_handle_fsm */ #define IGU_REG_INT_HANDLE_FSM 0x130050 #define IGU_REG_LEADING_EDGE_LATCH 0x130134 @@ -1501,6 +1528,8 @@ #define MISC_REG_MISC_PRTY_MASK 0xa398 /* [R 1] Parity register #0 read */ #define MISC_REG_MISC_PRTY_STS 0xa38c +/* [RC 1] Parity register #0 read clear */ +#define MISC_REG_MISC_PRTY_STS_CLR 0xa390 #define MISC_REG_NIG_WOL_P0 0xa270 #define MISC_REG_NIG_WOL_P1 0xa274 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst @@ -2082,6 +2111,10 @@ #define PBF_REG_PBF_INT_MASK 0x1401d4 /* [R 5] Interrupt register #0 read */ #define PBF_REG_PBF_INT_STS 0x1401c8 +/* [RW 20] Parity mask register #0 read/write */ +#define PBF_REG_PBF_PRTY_MASK 0x1401e4 +/* [RC 20] Parity register #0 read clear */ +#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc #define PB_REG_CONTROL 0 /* [RW 2] Interrupt mask register #0 read/write */ #define PB_REG_PB_INT_MASK 0x28 @@ -2091,6 +2124,8 @@ #define PB_REG_PB_PRTY_MASK 0x38 /* [R 4] Parity register #0 read */ #define PB_REG_PB_PRTY_STS 0x2c +/* [RC 4] Parity register #0 read clear */ +#define PB_REG_PB_PRTY_STS_CLR 0x30 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) @@ -2446,6 +2481,8 @@ #define PRS_REG_PRS_PRTY_MASK 0x401a4 /* [R 8] Parity register #0 read */ #define PRS_REG_PRS_PRTY_STS 0x40198 +/* [RC 8] Parity register #0 read clear */ +#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c /* [RW 8] Context region for pure acknowledge packets. Used in CFC load request message */ #define PRS_REG_PURE_REGIONS 0x40024 @@ -2599,6 +2636,9 @@ /* [R 32] Parity register #0 read */ #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c +/* [RC 32] Parity register #0 read clear */ +#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580 +#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives indication about backpressure) */ #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 @@ -3001,6 +3041,8 @@ #define PXP_REG_PXP_PRTY_MASK 0x103094 /* [R 26] Parity register #0 read */ #define PXP_REG_PXP_PRTY_STS 0x103088 +/* [RC 27] Parity register #0 read clear */ +#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c /* [RW 4] The activity counter initial increment value sent in the load request */ #define QM_REG_ACTCTRINITVAL_0 0x168040 @@ -3157,6 +3199,8 @@ #define QM_REG_QM_PRTY_MASK 0x168454 /* [R 12] Parity register #0 read */ #define QM_REG_QM_PRTY_STS 0x168448 +/* [RC 12] Parity register #0 read clear */ +#define QM_REG_QM_PRTY_STS_CLR 0x16844c /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ #define QM_REG_QSTATUS_HIGH 0x16802c /* [R 32] Current queues in pipeline: Queues from 96 to 127 */ @@ -3442,6 +3486,8 @@ #define QM_REG_WRRWEIGHTS_9 0x168848 /* [R 6] Keep the fill level of the fifo from write client 1 */ #define QM_REG_XQM_WRC_FIFOLVL 0x168000 +/* [W 1] reset to parity interrupt */ +#define SEM_FAST_REG_PARITY_RST 0x18840 #define SRC_REG_COUNTFREE0 0x40500 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two ports. If set the searcher support 8 functions. */ @@ -3470,6 +3516,8 @@ #define SRC_REG_SRC_PRTY_MASK 0x404c8 /* [R 3] Parity register #0 read */ #define SRC_REG_SRC_PRTY_STS 0x404bc +/* [RC 3] Parity register #0 read clear */ +#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ #define TCM_REG_CAM_OCCUP 0x5017c /* [RW 1] CDU AG read Interface enable. If 0 - the request input is @@ -3596,8 +3644,12 @@ #define TCM_REG_TCM_INT_MASK 0x501dc /* [R 11] Interrupt register #0 read */ #define TCM_REG_TCM_INT_STS 0x501d0 +/* [RW 27] Parity mask register #0 read/write */ +#define TCM_REG_TCM_PRTY_MASK 0x501ec /* [R 27] Parity register #0 read */ #define TCM_REG_TCM_PRTY_STS 0x501e0 +/* [RC 27] Parity register #0 read clear */ +#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). Is used to determine the number of the AG context REG-pairs written back; @@ -3755,6 +3807,10 @@ #define TM_REG_TM_INT_MASK 0x1640fc /* [R 1] Interrupt register #0 read */ #define TM_REG_TM_INT_STS 0x1640f0 +/* [RW 7] Parity mask register #0 read/write */ +#define TM_REG_TM_PRTY_MASK 0x16410c +/* [RC 7] Parity register #0 read clear */ +#define TM_REG_TM_PRTY_STS_CLR 0x164104 /* [RW 8] The event id for aggregated interrupt 0 */ #define TSDM_REG_AGG_INT_EVENT_0 0x42038 #define TSDM_REG_AGG_INT_EVENT_1 0x4203c @@ -3835,6 +3891,8 @@ #define TSDM_REG_TSDM_PRTY_MASK 0x422bc /* [R 11] Parity register #0 read */ #define TSDM_REG_TSDM_PRTY_STS 0x422b0 +/* [RC 11] Parity register #0 read clear */ +#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4 /* [RW 5] The number of time_slots in the arbitration cycle */ #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 /* [RW 3] The source that is associated with arbitration element 0. Source @@ -3914,6 +3972,9 @@ #define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 /* [RW 8] List of free threads . There is a bit per thread. */ #define TSEM_REG_THREADS_LIST 0x1802e4 +/* [RC 32] Parity register #0 read clear */ +#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118 +#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128 /* [RW 3] The arbitration scheme of time_slot 0 */ #define TSEM_REG_TS_0_AS 0x180038 /* [RW 3] The arbitration scheme of time_slot 10 */ @@ -4116,6 +4177,8 @@ #define UCM_REG_UCM_INT_STS 0xe01c8 /* [R 27] Parity register #0 read */ #define UCM_REG_UCM_PRTY_STS 0xe01d8 +/* [RC 27] Parity register #0 read clear */ +#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). Is used to determine the number of the AG context REG-pairs written back; @@ -4292,6 +4355,8 @@ #define USDM_REG_USDM_PRTY_MASK 0xc42c0 /* [R 11] Parity register #0 read */ #define USDM_REG_USDM_PRTY_STS 0xc42b4 +/* [RC 11] Parity register #0 read clear */ +#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8 /* [RW 5] The number of time_slots in the arbitration cycle */ #define USEM_REG_ARB_CYCLE_SIZE 0x300034 /* [RW 3] The source that is associated with arbitration element 0. Source @@ -4421,6 +4486,9 @@ /* [R 32] Parity register #0 read */ #define USEM_REG_USEM_PRTY_STS_0 0x300124 #define USEM_REG_USEM_PRTY_STS_1 0x300134 +/* [RC 32] Parity register #0 read clear */ +#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128 +#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ #define USEM_REG_VFPF_ERR_NUM 0x300380 @@ -4797,6 +4865,8 @@ #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc /* [R 11] Parity register #0 read */ #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 +/* [RC 11] Parity register #0 read clear */ +#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4 /* [RW 5] The number of time_slots in the arbitration cycle */ #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 /* [RW 3] The source that is associated with arbitration element 0. Source @@ -4929,6 +4999,9 @@ /* [R 32] Parity register #0 read */ #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 +/* [RC 32] Parity register #0 read clear */ +#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128 +#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138 #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) @@ -6316,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc) } +#endif /* BNX2X_REG_H */ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index 6e4d9b1..bda60d5 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) spin_lock_bh(&bp->stats_lock); + if (bp->stats_pending) { + spin_unlock_bh(&bp->stats_lock); + return; + } + ramrod_data.drv_counter = bp->stats_counter++; ramrod_data.collect_port = bp->port.pmf ? 1 : 0; for_each_eth_queue(bp, i) diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 3c403f8..56166ae 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev) netif_set_real_num_tx_queues(dev, pi->nqsets); err = netif_set_real_num_rx_queues(dev, pi->nqsets); if (err) - return err; - set_bit(pi->port_id, &adapter->open_device_map); + goto err_unwind; err = link_start(dev); if (err) - return err; + goto err_unwind; + netif_tx_start_all_queues(dev); + set_bit(pi->port_id, &adapter->open_device_map); return 0; + +err_unwind: + if (adapter->open_device_map == 0) + adapter_down(adapter); + return err; } /* @@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev) */ static int cxgb4vf_stop(struct net_device *dev) { - int ret; struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - ret = t4vf_enable_vi(adapter, pi->viid, false, false); + t4vf_enable_vi(adapter, pi->viid, false, false); pi->link_cfg.link_ok = 0; clear_bit(pi->port_id, &adapter->open_device_map); diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index e4bec78..0f51c80 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* * Write the command array into the Mailbox Data register array and * transfer ownership of the mailbox to the firmware. + * + * For the VFs, the Mailbox Data "registers" are actually backed by + * T4's "MA" interface rather than PL Registers (as is the case for + * the PFs). Because these are in different coherency domains, the + * write to the VF's PL-register-backed Mailbox Control can race in + * front of the writes to the MA-backed VF Mailbox Data "registers". + * So we need to do a read-back on at least one byte of the VF Mailbox + * Data registers before doing the write to the VF Mailbox Control + * register. */ for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); + t4_read_reg(adapter, mbox_data); /* flush write */ + t4_write_reg(adapter, mbox_ctl, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); t4_read_reg(adapter, mbox_ctl); /* flush write */ diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 77d08e6..aed223b 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw) if (hw->mac_type == e1000_82541 || hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547 || - hw->mac_type == e1000_82547_rev_2) { + hw->mac_type == e1000_82547_rev_2) hw->phy_type = e1000_phy_igp; - break; - } + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; default: /* Should never have loaded on this device */ hw->phy_type = e1000_phy_undefined; @@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82547GI: hw->mac_type = e1000_82547_rev_2; break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; default: /* Should never have loaded on this device */ return -E1000_ERR_MAC_TYPE; @@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw) case e1000_82542_rev2_1: hw->media_type = e1000_media_type_fiber; break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; default: status = er32(STATUS); if (status & E1000_STATUS_TBIMODE) { @@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) /* Reset is performed on a shadow of the control register */ ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); break; + case e1000_ce4100: default: ew32(CTRL, (ctrl | E1000_CTRL_RST)); break; @@ -952,6 +964,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) } /** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** * e1000_copper_link_preconfig - early configuration for copper * @hw: Struct containing variables accessed by shared code * @@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) if (hw->autoneg_advertised == 0) hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + e_dbg("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); if (ret_val) { @@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) s32 ret_val; e_dbg("e1000_copper_link_postconfig"); - if (hw->mac_type >= e1000_82544) { + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { e1000_config_collision_dist(hw); } else { ret_val = e1000_config_mac_to_phy(hw); @@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw) ret_val = e1000_copper_link_mgp_setup(hw); if (ret_val) return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } } if (hw->autoneg) { @@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) return ret_val; /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = - e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; /* Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for @@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - if (ret_val) - return ret_val; + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } return E1000_SUCCESS; } @@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) /* 82544 or newer MAC, Auto Speed Detection takes care of * MAC speed/duplex configuration.*/ - if (hw->mac_type >= e1000_82544) + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) return E1000_SUCCESS; /* Read the Device Control Register and set the bits to Force Speed @@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); - /* Set up duplex in the Device Control and Transmit Control - * registers depending on negotiated values. - */ - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); - if (ret_val) - return ret_val; + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; - if (phy_data & M88E1000_PSSR_DPLX) - ctrl |= E1000_CTRL_FD; - else - ctrl &= ~E1000_CTRL_FD; + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; - e1000_config_collision_dist(hw); + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; - /* Set up speed in the Device Control register depending on - * negotiated values. - */ - if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) - ctrl |= E1000_CTRL_SPD_1000; - else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) - ctrl |= E1000_CTRL_SPD_100; + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } /* Write the configured values back to the Device Control Reg. */ ew32(CTRL, ctrl); @@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw) * speed/duplex on the MAC to the current PHY speed/duplex * settings. */ - if (hw->mac_type >= e1000_82544) + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) e1000_config_collision_dist(hw); else { ret_val = e1000_config_mac_to_phy(hw); @@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, { u32 i; u32 mdic = 0; - const u32 phy_addr = 1; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; e_dbg("e1000_read_phy_reg_ex"); @@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ - mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_READ)); + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); - ew32(MDIC, mdic); + writel(mdic, E1000_MDIO_CMD); - /* Poll the ready bit to see if the MDI read completed */ - for (i = 0; i < 64; i++) { - udelay(50); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Read did not complete\n"); - return -E1000_ERR_PHY; - } - if (mdic & E1000_MDIC_ERROR) { - e_dbg("MDI Error\n"); - return -E1000_ERR_PHY; + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; } - *phy_data = (u16) mdic; } else { /* We must first send a preamble through the MDIO pin to signal the * beginning of an MII instruction. This is done by sending 32 @@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, { u32 i; u32 mdic = 0; - const u32 phy_addr = 1; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; e_dbg("e1000_write_phy_reg_ex"); @@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, } if (hw->mac_type > e1000_82543) { - /* Set up Op-code, Phy Address, register address, and data intended - * for the PHY register in the MDI Control register. The MAC will take - * care of interfacing with the PHY to send the desired data. + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. */ - mdic = (((u32) phy_data) | - (reg_addr << E1000_MDIC_REG_SHIFT) | - (phy_addr << E1000_MDIC_PHY_SHIFT) | - (E1000_MDIC_OP_WRITE)); + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); - ew32(MDIC, mdic); + writel(mdic, E1000_MDIO_CMD); - /* Poll the ready bit to see if the MDI read completed */ - for (i = 0; i < 641; i++) { - udelay(5); - mdic = er32(MDIC); - if (mdic & E1000_MDIC_READY) - break; - } - if (!(mdic & E1000_MDIC_READY)) { - e_dbg("MDI Write did not complete\n"); - return -E1000_ERR_PHY; + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } } } else { /* We'll need to use the SW defined pins to shift the write command @@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw) if (hw->phy_id == M88E1011_I_PHY_ID) match = true; break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID)) + match = true; + break; case e1000_82541: case e1000_82541_rev_2: case e1000_82547: @@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) if (hw->phy_type == e1000_phy_igp) return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; else return e1000_phy_m88_get_info(hw, phy_info); } @@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e_dbg("e1000_read_eeprom"); + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + /* If eeprom is not yet detected, do so now */ if (eeprom->word_size == 0) e1000_init_eeprom_params(hw); @@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, e_dbg("e1000_write_eeprom"); + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + /* If eeprom is not yet detected, do so now */ if (eeprom->word_size == 0) e1000_init_eeprom_params(hw); diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index ecd9f6c..f5514a0 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -52,6 +52,7 @@ typedef enum { e1000_82545, e1000_82545_rev_3, e1000_82546, + e1000_ce4100, e1000_82546_rev_3, e1000_82541, e1000_82541_rev_2, @@ -209,9 +210,11 @@ typedef enum { } e1000_1000t_rx_status; typedef enum { - e1000_phy_m88 = 0, - e1000_phy_igp, - e1000_phy_undefined = 0xFF + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF } e1000_phy_type; typedef enum { @@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); #define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82547EI_MOBILE 0x101A #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E #define NODE_ADDRESS_SIZE 6 #define ETH_LENGTH_OF_ADDRESS 6 @@ -808,6 +812,16 @@ struct e1000_ffvt_entry { #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ + +extern void __iomem *ce4100_gbe_mdio_base_virt; +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ @@ -820,6 +834,34 @@ struct e1000_ffvt_entry { #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + #define E1000_RCTL 0x00100 /* RX Control - RW */ #define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ #define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ @@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry { * in more current versions of the 8254x. Despite the difference in location, * the registers function in the same manner. */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX #define E1000_82542_CTRL E1000_CTRL #define E1000_82542_CTRL_DUP E1000_CTRL_DUP #define E1000_82542_STATUS E1000_STATUS @@ -1571,6 +1614,11 @@ struct e1000_hw { #define E1000_MDIC_INT_EN 0x20000000 #define E1000_MDIC_ERROR 0x40000000 +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + #define E1000_KUMCTRLSTA_MASK 0x0000FFFF #define E1000_KUMCTRLSTA_OFFSET 0x001F0000 #define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 @@ -2871,6 +2919,11 @@ struct e1000_host_command_info { #define M88E1111_I_PHY_ID 0x01410CC0 #define L1LXT971A_PHY_ID 0x001378E0 +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + /* Bits... * 15-5: page * 4-0: register offset diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 340e12d..4ff88a6 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -28,6 +28,12 @@ #include "e1000.h" #include <net/ip6_checksum.h> +#include <linux/io.h> + +/* Intel Media SOC GbE MDIO physical base address */ +static unsigned long ce4100_gbe_mdio_base_phy; +/* Intel Media SOC GbE MDIO virtual base address */ +void __iomem *ce4100_gbe_mdio_base_virt; char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; @@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), /* required last entry */ {0,} }; @@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) case e1000_82545: case e1000_82545_rev_3: case e1000_82546: + case e1000_ce4100: case e1000_82546_rev_3: case e1000_82541: case e1000_82541_rev_2: @@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter) case e1000_82545: case e1000_82545_rev_3: case e1000_82546: + case e1000_ce4100: case e1000_82546_rev_3: pba = E1000_PBA_48K; break; @@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, static int global_quad_port_a = 0; /* global ksp3 port a indication */ int i, err, pci_using_dac; u16 eeprom_data = 0; + u16 tmp = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; int bars, need_ioport; @@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev, goto err_sw_init; err = -EIO; + if (hw->mac_type == e1000_ce4100) { + ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1); + ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy, + pci_resource_len(pdev, BAR_1)); + + if (!ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } if (hw->mac_type >= e1000_82543) { netdev->features = NETIF_F_SG | @@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->wol = adapter->eeprom_wol; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + if (tmp == 0 || tmp == 0xFF) { + if (i == 31) + goto err_eeprom; + continue; + } else + break; + } + } + /* reset the hardware with the new settings */ e1000_reset(adapter); @@ -1171,6 +1203,8 @@ err_eeprom: kfree(adapter->rx_ring); err_dma: err_sw_init: +err_mdio_ioremap: + iounmap(ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, /* First rev 82545 and 82546 need to not allow any memory * write location to cross 64k boundary due to errata 23 */ if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || hw->mac_type == e1000_82546) { return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; } diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index edd1c75..55c1711 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h @@ -34,12 +34,21 @@ #ifndef _E1000_OSDEP_H_ #define _E1000_OSDEP_H_ -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/delay.h> #include <asm/io.h> -#include <linux/interrupt.h> -#include <linux/sched.h> + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) #define er32(reg) \ (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index e57e409..cb6c7b1 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); /** * e1000_init_phy_params_82571 - Init PHY func ptrs. @@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) phy->type = e1000_phy_bm; phy->ops.acquire = e1000_get_hw_semaphore_82574; phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; break; default: return -E1000_ERR_PHY; @@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) /* This can only be done after all function pointers are setup. */ ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } /* Verify phy id */ switch (hw->mac.type) { case e1000_82571: case e1000_82572: if (phy->id != IGP01E1000_I_PHY_ID) - return -E1000_ERR_PHY; + ret_val = -E1000_ERR_PHY; break; case e1000_82573: if (phy->id != M88E1111_I_PHY_ID) - return -E1000_ERR_PHY; + ret_val = -E1000_ERR_PHY; break; case e1000_82574: case e1000_82583: if (phy->id != BME1000_E_PHY_ID_R2) - return -E1000_ERR_PHY; + ret_val = -E1000_ERR_PHY; break; default: - return -E1000_ERR_PHY; + ret_val = -E1000_ERR_PHY; break; } - return 0; + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; } /** @@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) } /** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u16 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** * e1000_acquire_nvm_82571 - Request for access to the EEPROM * @hw: pointer to the HW structure * @@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, icr; + u32 ctrl, ctrl_ext; s32 ret_val; /* @@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) /* Clear any pending interrupt events. */ ew32(IMC, 0xffffffff); - icr = er32(ICR); + er32(ICR); if (hw->mac.type == e1000_82571) { /* Install any alternate MAC address into RAR0 */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 2c913b8..5255be7 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -38,6 +38,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci-aspm.h> +#include <linux/crc32.h> #include "hw.h" @@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); extern void e1000e_update_stats(struct e1000_adapter *adapter); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_get_hw_control(struct e1000_adapter *adapter); +extern void e1000e_release_hw_control(struct e1000_adapter *adapter); extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); extern unsigned int copybreak; diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index b18c644..e45a61c 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, **/ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { - u32 ctrl, icr; + u32 ctrl; s32 ret_val; /* @@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) /* Clear any pending interrupt events. */ ew32(IMC, 0xffffffff); - icr = er32(ICR); + er32(ICR); ret_val = e1000_check_alt_mac_addr_generic(hw); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index affcacf..f8ed03d 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); char firmware_version[32]; - strncpy(drvinfo->driver, e1000e_driver_name, 32); - strncpy(drvinfo->version, e1000e_driver_version, 32); + strncpy(drvinfo->driver, e1000e_driver_name, + sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version) - 1); /* * EEPROM image version # is reported as firmware version # for * PCI-E controllers */ - sprintf(firmware_version, "%d.%d-%d", + snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", (adapter->eeprom_vers & 0xF000) >> 12, (adapter->eeprom_vers & 0x0FF0) >> 4, (adapter->eeprom_vers & 0x000F)); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); drvinfo->regdump_len = e1000_get_regs_len(netdev); drvinfo->eedump_len = e1000_get_eeprom_len(netdev); } @@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev, bool if_running = netif_running(netdev); set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* Offline tests */ @@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ dev_close(netdev); - else - e1000e_reset(adapter); if (e1000_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; e1000e_reset(adapter); - /* make sure the phy is powered up */ - e1000e_power_up_phy(adapter); if (e1000_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) dev_open(netdev); } else { - if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { - clear_bit(__E1000_TESTING, &adapter->state); - dev_open(netdev); - set_bit(__E1000_TESTING, &adapter->state); - } + /* Online tests */ e_info("online testing starting\n"); - /* Online tests */ - if (e1000_link_test(adapter, &data[4])) - eth_test->flags |= ETH_TEST_FL_FAILED; - /* Online tests aren't run; pass by default */ + /* register, eeprom, intr and loopback tests not run online */ data[0] = 0; data[1] = 0; data[2] = 0; data[3] = 0; - if (!if_running && (adapter->flags & FLAG_HAS_AMT)) - dev_close(netdev); + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__E1000_TESTING, &adapter->state); } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + msleep_interruptible(4 * 1000); } diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index ba302a5..e774380 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -83,6 +83,7 @@ enum e1e_registers { E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ E1000_PBS = 0x01008, /* Packet Buffer Size */ E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index d86cc08..5328a292 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) } } -static u32 e1000_calc_rx_da_crc(u8 mac[]) -{ - u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ - u32 i, j, mask, crc; - - crc = 0xffffffff; - for (i = 0; i < 6; i++) { - crc = crc ^ mac[i]; - for (j = 8; j > 0; j--) { - mask = (crc & 1) * (-1); - crc = (crc >> 1) ^ (poly & mask); - } - } - return ~crc; -} - /** * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * with 82579 PHY @@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) mac_addr[4] = (addr_high & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF); - ew32(PCH_RAICC(i), - e1000_calc_rx_da_crc(mac_addr)); + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); } /* Write Rx addresses to the PHY */ @@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 reg; - u32 ctrl, icr, kab; + u32 ctrl, kab; s32 ret_val; /* @@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(CRC_OFFSET, 0x65656565); ew32(IMC, 0xffffffff); - icr = er32(ICR); + er32(ICR); kab = er32(KABGTXD); kab |= E1000_KABGTXD_BGSQLBIAS; @@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) * Reset the phy after disabling host wakeup to reset the Rx buffer. */ if (hw->phy.type == e1000_phy_82578) { - hw->phy.ops.read_reg(hw, BM_WUC, &i); + e1e_rphy(hw, BM_WUC, &i); ret_val = e1000_phy_hw_reset_ich8lan(hw); if (ret_val) return ret_val; @@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) (hw->phy.type == e1000_phy_82577)) { ew32(FCRTV_PCH, hw->fc.refresh_time); - ret_val = hw->phy.ops.write_reg(hw, - PHY_REG(BM_PORT_CTRL_PAGE, 27), - hw->fc.pause_time); + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); if (ret_val) return ret_val; } @@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; break; case e1000_phy_ife: - ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, - ®_data); + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); if (ret_val) return ret_val; @@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) reg_data |= IFE_PMC_AUTO_MDIX; break; } - ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, - reg_data); + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); if (ret_val) return ret_val; break; @@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) { if (hw->phy.type == e1000_phy_ife) return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, - (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); ew32(LEDCTL, hw->mac.ledctl_mode1); return 0; @@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) { - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, - (u16)hw->mac.ledctl_mode1); + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); } /** @@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) **/ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) { - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, - (u16)hw->mac.ledctl_default); + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); } /** @@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) } } - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); + return e1e_wphy(hw, HV_LED_CONFIG, data); } /** @@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) } } - return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); + return e1e_wphy(hw, HV_LED_CONFIG, data); } /** @@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_82577)) { - hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); - hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); - hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); + e1e_rphy(hw, HV_SCC_UPPER, &phy_data); + e1e_rphy(hw, HV_SCC_LOWER, &phy_data); + e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); + e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); + e1e_rphy(hw, HV_MCC_UPPER, &phy_data); + e1e_rphy(hw, HV_MCC_LOWER, &phy_data); + e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); + e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); + e1e_rphy(hw, HV_COLC_UPPER, &phy_data); + e1e_rphy(hw, HV_COLC_LOWER, &phy_data); + e1e_rphy(hw, HV_DC_UPPER, &phy_data); + e1e_rphy(hw, HV_DC_LOWER, &phy_data); + e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); + e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); } } diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 7e55170..ff28721 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); if (ret_val) return ret_val; - ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); + ret_val = + e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); if (ret_val) return ret_val; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index fe50242..fa5b604 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -1980,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) } /** - * e1000_get_hw_control - get control of the h/w from f/w + * e1000e_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * - * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) * of the f/w this means that the network i/f is open. **/ -static void e1000_get_hw_control(struct e1000_adapter *adapter) +void e1000e_get_hw_control(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; @@ -2005,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) } /** - * e1000_release_hw_control - release control of the h/w to f/w + * e1000e_release_hw_control - release control of the h/w to f/w * @adapter: address of board private structure * - * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i * of the f/w this means that the network i/f is closed. * **/ -static void e1000_release_hw_control(struct e1000_adapter *adapter) +void e1000e_release_hw_control(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; @@ -2445,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && (vid == adapter->mng_vlan_id)) { /* release control to f/w */ - e1000_release_hw_control(adapter); + e1000e_release_hw_control(adapter); return; } @@ -2734,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); else ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable jumbo frame workaround mode\n"); } /* Program MC offset vector base */ @@ -3184,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter) ew32(PBA, pba); } - /* * flow control settings * @@ -3272,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter) * that the network interface is in control */ if (adapter->flags & FLAG_HAS_AMT) - e1000_get_hw_control(adapter); + e1000e_get_hw_control(adapter); ew32(WUC, 0); @@ -3285,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter) ew32(VET, ETH_P_8021Q); e1000e_reset_adaptive(hw); + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) { + e1000_power_down_phy(adapter); + return; + } + e1000_get_phy_info(hw); if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && @@ -3570,7 +3579,7 @@ static int e1000_open(struct net_device *netdev) * interface is now open and reset the part to a known state. */ if (adapter->flags & FLAG_HAS_AMT) { - e1000_get_hw_control(adapter); + e1000e_get_hw_control(adapter); e1000e_reset(adapter); } @@ -3634,7 +3643,7 @@ static int e1000_open(struct net_device *netdev) return 0; err_req_irq: - e1000_release_hw_control(adapter); + e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); e1000e_free_rx_resources(adapter); err_setup_rx: @@ -3689,8 +3698,9 @@ static int e1000_close(struct net_device *netdev) * If AMT is enabled, let the firmware know that the network * interface is now closed */ - if (adapter->flags & FLAG_HAS_AMT) - e1000_release_hw_control(adapter); + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); if ((adapter->flags & FLAG_HAS_ERT) || (adapter->hw.mac.type == e1000_pch2lan)) @@ -5209,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ - e1000_release_hw_control(adapter); + e1000e_release_hw_control(adapter); pci_disable_device(pdev); @@ -5366,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev) * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) - e1000_get_hw_control(adapter); + e1000e_get_hw_control(adapter); return 0; } @@ -5613,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev) * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) - e1000_get_hw_control(adapter); + e1000e_get_hw_control(adapter); } @@ -5636,7 +5646,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) ret_val = e1000_read_pba_string_generic(hw, pba_str, E1000_PBANUM_LENGTH); if (ret_val) - strcpy(pba_str, "Unknown"); + strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); e_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, pba_str); } @@ -5963,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, * under the control of the driver. */ if (!(adapter->flags & FLAG_HAS_AMT)) - e1000_get_hw_control(adapter); + e1000e_get_hw_control(adapter); - strcpy(netdev->name, "eth%d"); + strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); err = register_netdev(netdev); if (err) goto err_register; @@ -5982,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, err_register: if (!(adapter->flags & FLAG_HAS_AMT)) - e1000_release_hw_control(adapter); + e1000e_release_hw_control(adapter); err_eeprom: if (!e1000_check_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); err_hw_init: - kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: @@ -6053,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev) * Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ - e1000_release_hw_control(adapter); + e1000e_release_hw_control(adapter); e1000e_reset_interrupt_capability(adapter); kfree(adapter->tx_ring); diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 1781efe..a640f1c 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { - struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) goto out; @@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); out: return ret_val; @@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) } if (phy->type == e1000_phy_82578) { - ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* 82578 PHY - set the downshift count to 1x. */ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; - ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } @@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) * We didn't get link. * Reset the DSP and cross our fingers. */ - ret_val = e1e_wphy(hw, - M88E1000_PHY_PAGE_SELECT, - 0x001d); + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); if (ret_val) return ret_val; ret_val = e1000e_phy_reset_dsp(hw); @@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) goto out; /* Do not apply workaround if in PHY loopback bit 14 set */ - hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); + e1e_rphy(hw, PHY_CONTROL, &data); if (data & PHY_CONTROL_LB) goto out; /* check if link is up and at 1Gbps */ - ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); if (ret_val) goto out; @@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) mdelay(200); /* flush the packets in the fifo buffer */ - ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, - HV_MUX_DATA_CTRL_GEN_TO_MAC | - HV_MUX_DATA_CTRL_FORCE_SPEED); + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED); if (ret_val) goto out; - ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, - HV_MUX_DATA_CTRL_GEN_TO_MAC); + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); out: return ret_val; @@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw) s32 ret_val; u16 data; - ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) @@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) u16 phy_data; bool link; - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) goto out; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); - ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) goto out; @@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) if (ret_val) goto out; - ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (ret_val) goto out; @@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) if (ret_val) goto out; - ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) goto out; @@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) s32 ret_val; u16 phy_data, length; - ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) goto out; diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index a724a2d..6c7257b 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -40,7 +40,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0106" +#define DRV_VERSION "EHEA_0107" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 1032b5b..f75d314 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) } } /* Ring doorbell */ - ehea_update_rq1a(pr->qp, i); + ehea_update_rq1a(pr->qp, i - 1); } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr) int ret; struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; - ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 - - init_attr->act_nr_rwqes_rq2 - - init_attr->act_nr_rwqes_rq3 - 1); + ehea_init_fill_rq1(pr, pr->rq1_skba.len); ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); diff --git a/drivers/net/fec.c b/drivers/net/fec.c index cce32d4..2a71373 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -17,6 +17,8 @@ * * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) * Copyright (c) 2004-2006 Macq Electronique SA. + * + * Copyright (C) 2010 Freescale Semiconductor, Inc. */ #include <linux/module.h> @@ -45,29 +47,41 @@ #include <asm/cacheflush.h> -#ifndef CONFIG_ARCH_MXC +#ifndef CONFIG_ARM #include <asm/coldfire.h> #include <asm/mcfsim.h> #endif #include "fec.h" -#ifdef CONFIG_ARCH_MXC -#include <mach/hardware.h> +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) #define FEC_ALIGNMENT 0xf #else #define FEC_ALIGNMENT 0x3 #endif -/* - * Define the fixed address of the FEC hardware. - */ -#if defined(CONFIG_M5272) +#define DRIVER_NAME "fec" + +/* Controller is ENET-MAC */ +#define FEC_QUIRK_ENET_MAC (1 << 0) +/* Controller needs driver to swap frame */ +#define FEC_QUIRK_SWAP_FRAME (1 << 1) -static unsigned char fec_mac_default[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +static struct platform_device_id fec_devtype[] = { + { + .name = DRIVER_NAME, + .driver_data = 0, + }, { + .name = "imx28-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, + } }; +static unsigned char macaddr[ETH_ALEN]; +module_param_array(macaddr, byte, NULL, 0); +MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); + +#if defined(CONFIG_M5272) /* * Some hardware gets it MAC address out of local flash memory. * if this is non-zero then assume it is the address to get MAC from. @@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = { * account when setting it. */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 @@ -186,7 +201,6 @@ struct fec_enet_private { int mii_timeout; uint phy_speed; phy_interface_t phy_interface; - int index; int link; int full_duplex; struct completion mdio_done; @@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev); /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) +static void *swap_buffer(void *bufaddr, int len) +{ + int i; + unsigned int *buf = bufaddr; + + for (i = 0; i < (len + 3) / 4; i++, buf++) + *buf = cpu_to_be32(*buf); + + return bufaddr; +} + static netdev_tx_t fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); struct bufdesc *bdp; void *bufaddr; unsigned short status; @@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) bufaddr = fep->tx_bounce[index]; } + /* + * Some design made an incorrect assumption on endian mode of + * the system that it's running on. As the result, driver has to + * swap every frame going to and coming from the controller. + */ + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, skb->len); + /* Save skb pointer */ fep->tx_skbuff[fep->skb_cur] = skb; @@ -429,6 +464,8 @@ static void fec_enet_rx(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; @@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev) dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_FROM_DEVICE); + if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, pkt_len); + /* This does 16 byte alignment, exactly what we need. * The packet length includes FCS, but we don't want to * include that when passing upstream as it messes up @@ -538,37 +578,50 @@ rx_processing_done: } /* ------------------------------------------------------------------------- */ -#ifdef CONFIG_M5272 static void __inline__ fec_get_mac(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_platform_data *pdata = fep->pdev->dev.platform_data; unsigned char *iap, tmpaddr[ETH_ALEN]; - if (FEC_FLASHMAC) { - /* - * Get MAC address from FLASH. - * If it is all 1's or 0's, use the default. - */ - iap = (unsigned char *)FEC_FLASHMAC; - if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && - (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) - iap = fec_mac_default; - if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && - (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) - iap = fec_mac_default; - } else { - *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); - *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); + /* + * try to get mac address in following order: + * + * 1) module parameter via kernel command line in form + * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 + */ + iap = macaddr; + + /* + * 2) from flash or fuse (via platform data) + */ + if (!is_valid_ether_addr(iap)) { +#ifdef CONFIG_M5272 + if (FEC_FLASHMAC) + iap = (unsigned char *)FEC_FLASHMAC; +#else + if (pdata) + memcpy(iap, pdata->mac, ETH_ALEN); +#endif + } + + /* + * 3) FEC mac registers set by bootloader + */ + if (!is_valid_ether_addr(iap)) { + *((unsigned long *) &tmpaddr[0]) = + be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); + *((unsigned short *) &tmpaddr[4]) = + be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); iap = &tmpaddr[0]; } memcpy(dev->dev_addr, iap, ETH_ALEN); - /* Adjust MAC if using default MAC address */ - if (iap == fec_mac_default) - dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; + /* Adjust MAC if using macaddr */ + if (iap == macaddr) + dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; } -#endif /* ------------------------------------------------------------------------- */ @@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, fep->mii_timeout = 0; init_completion(&fep->mdio_done); - /* start a read op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | + /* start a write op */ + writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | FEC_MMFR_TA | FEC_MMFR_DATA(value), fep->hwp + FEC_MII_DATA); @@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev) char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; int phy_id; + int dev_id = fep->pdev->id; fep->phy_dev = NULL; @@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev) continue; if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) continue; + if (dev_id--) + continue; strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } @@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev) static int fec_enet_mii_init(struct platform_device *pdev) { + static struct mii_bus *fec0_mii_bus; struct net_device *dev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(dev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); int err = -ENXIO, i; + /* + * The dual fec interfaces are not equivalent with enet-mac. + * Here are the differences: + * + * - fec0 supports MII & RMII modes while fec1 only supports RMII + * - fec0 acts as the 1588 time master while fec1 is slave + * - external phys can only be configured by fec0 + * + * That is to say fec1 can not work independently. It only works + * when fec0 is working. The reason behind this design is that the + * second interface is added primarily for Switch mode. + * + * Because of the last point above, both phys are attached on fec0 + * mdio interface in board design, and need to be configured by + * fec0 mii_bus. + */ + if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) { + /* fec1 uses fec0 mii_bus */ + fep->mii_bus = fec0_mii_bus; + return 0; + } + fep->mii_timeout = 0; /* @@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; + /* save fec0 mii_bus */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + fec0_mii_bus = fep->mii_bus; + return 0; err_out_free_mdio_irq: @@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = { /* * XXX: We need to clean up on failure exits here. * - * index is only used in legacy code */ -static int fec_enet_init(struct net_device *dev, int index) +static int fec_enet_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); struct bufdesc *cbd_base; @@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index) spin_lock_init(&fep->hw_lock); - fep->index = index; fep->hwp = (void __iomem *)dev->base_addr; fep->netdev = dev; - /* Set the Ethernet address */ -#ifdef CONFIG_M5272 + /* Get the Ethernet address */ fec_get_mac(dev); -#else - { - unsigned long l; - l = readl(fep->hwp + FEC_ADDR_LOW); - dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); - dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); - dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); - dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); - l = readl(fep->hwp + FEC_ADDR_HIGH); - dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); - dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); - } -#endif /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; @@ -1156,12 +1225,25 @@ static void fec_restart(struct net_device *dev, int duplex) { struct fec_enet_private *fep = netdev_priv(dev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); int i; + u32 val, temp_mac[2]; /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); + /* + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + memcpy(&temp_mac, dev->dev_addr, ETH_ALEN); + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + } + /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); @@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex) /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); -#ifdef FEC_MIIGSK_ENR - if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { - /* disable the gasket and wait */ - writel(0, fep->hwp + FEC_MIIGSK_ENR); - while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) - udelay(1); + /* + * The phy interface and speed need to get configured + * differently on enet-mac. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + val = readl(fep->hwp + FEC_R_CNTRL); - /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ - writel(1, fep->hwp + FEC_MIIGSK_CFGR); + /* MII or RMII */ + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + val |= (1 << 8); + else + val &= ~(1 << 8); - /* re-enable the gasket */ - writel(2, fep->hwp + FEC_MIIGSK_ENR); - } + /* 10M or 100M */ + if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) + val &= ~(1 << 9); + else + val |= (1 << 9); + + writel(val, fep->hwp + FEC_R_CNTRL); + } else { +#ifdef FEC_MIIGSK_ENR + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* + * configure the gasket: + * RMII, 50 MHz, no loopback, no echo + */ + writel(1, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); + } #endif + } /* And last, enable the transmit and receive processing */ writel(2, fep->hwp + FEC_ECNTRL); @@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev) } clk_enable(fep->clk); - ret = fec_enet_init(ndev, 0); + ret = fec_enet_init(ndev); if (ret) goto failed_init; @@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev) if (ndev) { fep = netdev_priv(ndev); - if (netif_running(ndev)) - fec_enet_close(ndev); + if (netif_running(ndev)) { + fec_stop(ndev); + netif_device_detach(ndev); + } clk_disable(fep->clk); } return 0; @@ -1396,8 +1505,10 @@ fec_resume(struct device *dev) if (ndev) { fep = netdev_priv(ndev); clk_enable(fep->clk); - if (netif_running(ndev)) - fec_enet_open(ndev); + if (netif_running(ndev)) { + fec_restart(ndev, fep->full_duplex); + netif_device_attach(ndev); + } } return 0; } @@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = { static struct platform_driver fec_driver = { .driver = { - .name = "fec", + .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &fec_pm_ops, #endif }, + .id_table = fec_devtype, .probe = fec_probe, .remove = __devexit_p(fec_drv_remove), }; diff --git a/drivers/net/fec.h b/drivers/net/fec.h index 2c48b25..ace318d 100644 --- a/drivers/net/fec.h +++ b/drivers/net/fec.h @@ -14,7 +14,8 @@ /****************************************************************************/ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ + defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -78,7 +79,7 @@ /* * Define the buffer descriptor structure. */ -#ifdef CONFIG_ARCH_MXC +#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) struct bufdesc { unsigned short cbd_datlen; /* Data length */ unsigned short cbd_sc; /* Control and status info */ diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index cd2d72d..af09296 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) writel(flags, base + NvRegWakeUpFlags); spin_unlock_irq(&np->lock); } + device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); return 0; } @@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i /* set mac address */ nv_copy_mac_to_hw(dev); - /* Workaround current PCI init glitch: wakeup bits aren't - * being set from PCI PM capability. - */ - device_init_wakeup(&pci_dev->dev, 1); - /* disable WOL */ writel(0, base + NvRegWakeUpFlags); np->wolenabled = 0; + device_set_wakeup_enable(&pci_dev->dev, false); if (id->driver_data & DEV_HAS_POWER_CNTRL) { @@ -5746,8 +5743,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) } #ifdef CONFIG_PM -static int nv_suspend(struct pci_dev *pdev, pm_message_t state) +static int nv_suspend(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); @@ -5763,25 +5761,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state) for (i = 0; i <= np->register_size/sizeof(u32); i++) np->saved_config_space[i] = readl(base + i*sizeof(u32)); - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } -static int nv_resume(struct pci_dev *pdev) +static int nv_resume(struct device *device) { + struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int i, rc = 0; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, PCI_D0, 0); - /* restore non-pci configuration space */ for (i = 0; i <= np->register_size/sizeof(u32); i++) writel(np->saved_config_space[i], base+i*sizeof(u32)); @@ -5800,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev) return rc; } +static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); +#define NV_PM_OPS (&nv_pm_ops) + static void nv_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -5822,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev) * only put the device into D3 if we really go for poweroff. */ if (system_state == SYSTEM_POWER_OFF) { - if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) - pci_enable_wake(pdev, PCI_D3hot, np->wolenabled); + pci_wake_from_d3(pdev, np->wolenabled); pci_set_power_state(pdev, PCI_D3hot); } } #else -#define nv_suspend NULL +#define NV_PM_OPS NULL #define nv_shutdown NULL -#define nv_resume NULL #endif /* CONFIG_PM */ static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { @@ -6002,9 +5993,8 @@ static struct pci_driver driver = { .id_table = pci_tbl, .probe = nv_probe, .remove = __devexit_p(nv_remove), - .suspend = nv_suspend, - .resume = nv_resume, .shutdown = nv_shutdown, + .driver.pm = NV_PM_OPS, }; static int __init init_nic(void) diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 4e7d1d0..7d9ced0 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate, while (p) { if (p->bitrate == bitrate) { memcpy(p->bits, bits, YAM_FPGA_SIZE); - return p->bits; + goto out; } p = p->next; } @@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate, p->bitrate = bitrate; p->next = yam_data; yam_data = p; - + out: release_firmware(fw); return p->bits; } diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 3ae30b8..3b8c924 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *); extern void ixgbe_free_tx_resources(struct ixgbe_ring *); extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); +extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); @@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - struct ixgbe_atr_input *input, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, u8 queue); extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - struct ixgbe_atr_input *input, + union ixgbe_atr_input *input, struct ixgbe_atr_input_masks *input_masks, u16 soft_id, u8 queue); -extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, - u16 vlan_id); -extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, - u32 src_addr); -extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, - u32 dst_addr); -extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, - u16 src_port); -extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, - u16 dst_port); -extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, - u16 flex_byte); -extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, - u8 l4type); extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index bfd3c22..8d316d9 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) udelay(10); } if (i >= IXGBE_FDIRCMD_CMD_POLL) { - hw_dbg(hw ,"Flow Director previous command isn't complete, " + hw_dbg(hw, "Flow Director previous command isn't complete, " "aborting table re-initialization.\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } @@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) /* Move the flexible bytes to use the ethertype - shift 6 words */ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); - fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; /* Prime the keys for hashing */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, - htonl(IXGBE_ATR_BUCKET_HASH_KEY)); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, - htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Poll init-done after we write the register. Estimated times: @@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); /* Prime the keys for hashing */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, - htonl(IXGBE_ATR_BUCKET_HASH_KEY)); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, - htonl(IXGBE_ATR_SIGNATURE_HASH_KEY)); + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Poll init-done after we write the register. Estimated times: @@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) * @stream: input bitstream to compute the hash on * @key: 32-bit hash key **/ -static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, - u32 key) +static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + u32 key) { /* * The algorithm is as follows: @@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, * To simplify for programming, the algorithm is implemented * in software this way: * - * Key[31:0], Stream[335:0] + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] + * + * for (i = 0; i < 352; i+=32) + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; + * + * lo_hash_dword[15:0] ^= Stream[15:0]; + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; * - * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times - * int_key[350:0] = tmp_key[351:1] - * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] + * hi_hash_dword[31:0] ^= Stream[351:320]; * - * hash[15:0] = 0; - * for (i = 0; i < 351; i++) { - * if (int_key[i]) - * hash ^= int_stream[(i + 15):i]; + * if(key[0]) + * hash[15:0] ^= Stream[15:0]; + * + * for (i = 0; i < 16; i++) { + * if (key[i]) + * hash[15:0] ^= lo_hash_dword[(i+15):i]; + * if (key[i + 16]) + * hash[15:0] ^= hi_hash_dword[(i+15):i]; * } + * */ + __be32 common_hash_dword = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 hash_result = 0; + u8 i; - union { - u64 fill[6]; - u32 key[11]; - u8 key_stream[44]; - } tmp_key; + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(atr_input->dword_stream[0]); - u8 *stream = (u8 *)atr_input; - u8 int_key[44]; /* upper-most bit unused */ - u8 hash_str[46]; /* upper-most 2 bits unused */ - u16 hash_result = 0; - int i, j, k, h; + /* generate common hash dword */ + for (i = 10; i; i -= 2) + common_hash_dword ^= atr_input->dword_stream[i] ^ + atr_input->dword_stream[i - 1]; - /* - * Initialize the fill member to prevent warnings - * on some compilers - */ - tmp_key.fill[0] = 0; + hi_hash_dword = ntohl(common_hash_dword); - /* First load the temporary key stream */ - for (i = 0; i < 6; i++) { - u64 fillkey = ((u64)key << 32) | key; - tmp_key.fill[i] = fillkey; - } + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); - /* - * Set the interim key for the hashing. Bit 352 is unused, so we must - * shift and compensate when building the key. - */ + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - int_key[0] = tmp_key.key_stream[0] >> 1; - for (i = 1, j = 0; i < 44; i++) { - unsigned int this_key = tmp_key.key_stream[j] << 7; - j++; - int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); - } - - /* - * Set the interim bit string for the hashing. Bits 368 and 367 are - * unused, so shift and compensate when building the string. - */ - hash_str[0] = (stream[40] & 0x7f) >> 1; - for (i = 1, j = 40; i < 46; i++) { - unsigned int this_str = stream[j] << 7; - j++; - if (j > 41) - j = 0; - hash_str[i] = (u8)(this_str | (stream[j] >> 1)); - } + /* Process bits 0 and 16 */ + if (key & 0x0001) hash_result ^= lo_hash_dword; + if (key & 0x00010000) hash_result ^= hi_hash_dword; /* - * Now compute the hash. i is the index into hash_str, j is into our - * key stream, k is counting the number of bits, and h interates within - * each byte. + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed */ - for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { - for (h = 0; h < 8 && k < 351; h++, k++) { - if (int_key[j] & (1 << h)) { - /* - * Key bit is set, XOR in the current 16-bit - * string. Example of processing: - * h = 0, - * tmp = (hash_str[i - 2] & 0 << 16) | - * (hash_str[i - 1] & 0xff << 8) | - * (hash_str[i] & 0xff >> 0) - * So tmp = hash_str[15 + k:k], since the - * i + 2 clause rolls off the 16-bit value - * h = 7, - * tmp = (hash_str[i - 2] & 0x7f << 9) | - * (hash_str[i - 1] & 0xff << 1) | - * (hash_str[i] & 0x80 >> 7) - */ - int tmp = (hash_str[i] >> h); - tmp |= (hash_str[i - 1] << (8 - h)); - tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) - << (16 - h); - hash_result ^= (u16)tmp; - } - } - } - - return hash_result; -} - -/** - * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream - * @input: input stream to modify - * @vlan: the VLAN id to load - **/ -s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) -{ - input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; - input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; - - return 0; -} - -/** - * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address - * @input: input stream to modify - * @src_addr: the IP address to load - **/ -s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) -{ - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = - (src_addr >> 16) & 0xff; - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = - (src_addr >> 8) & 0xff; - input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; - - return 0; -} - -/** - * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address - * @input: input stream to modify - * @dst_addr: the IP address to load - **/ -s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) -{ - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = - (dst_addr >> 16) & 0xff; - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = - (dst_addr >> 8) & 0xff; - input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; - - return 0; -} + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); -/** - * ixgbe_atr_set_src_port_82599 - Sets the source port - * @input: input stream to modify - * @src_port: the source port to load - **/ -s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) -{ - input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; - input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; - - return 0; -} - -/** - * ixgbe_atr_set_dst_port_82599 - Sets the destination port - * @input: input stream to modify - * @dst_port: the destination port to load - **/ -s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) -{ - input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; - input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; - - return 0; -} - -/** - * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes - * @input: input stream to modify - * @flex_bytes: the flexible bytes to load - **/ -s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) -{ - input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; - input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; - - return 0; -} - -/** - * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type - * @input: input stream to modify - * @l4type: the layer 4 type value to load - **/ -s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) -{ - input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; - - return 0; -} - -/** - * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream - * @input: input stream to search - * @vlan: the VLAN id to load - **/ -static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) -{ - *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; - *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; - - return 0; -} - -/** - * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address - * @input: input stream to search - * @src_addr: the IP address to load - **/ -static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, - u32 *src_addr) -{ - *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; - *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; - - return 0; -} -/** - * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address - * @input: input stream to search - * @dst_addr: the IP address to load - **/ -static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, - u32 *dst_addr) -{ - *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; - *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; + /* process the remaining 30 bits in the key 2 bits at a time */ + for (i = 15; i; i-- ) { + if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; + } - return 0; + return hash_result & IXGBE_ATR_HASH_MASK; } -/** - * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address - * @input: input stream to search - * @src_addr_1: the first 4 bytes of the IP address to load - * @src_addr_2: the second 4 bytes of the IP address to load - * @src_addr_3: the third 4 bytes of the IP address to load - * @src_addr_4: the fourth 4 bytes of the IP address to load - **/ -static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, - u32 *src_addr_1, u32 *src_addr_2, - u32 *src_addr_3, u32 *src_addr_4) -{ - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; - *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; - - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; - *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; - - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; - *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; - - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; - *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; - - return 0; -} +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0); /** - * ixgbe_atr_get_src_port_82599 - Gets the source port - * @input: input stream to modify - * @src_port: the source port to load + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on * - * Even though the input is given in big-endian, the FDIRPORT registers - * expect the ports to be programmed in little-endian. Hence the need to swap - * endianness when retrieving the data. This can be confusing since the - * internal hash engine expects it to be big-endian. + * This function is almost identical to the function above but contains + * several optomizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. **/ -static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, - u16 *src_port) +static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) { - *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; - *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; - return 0; -} + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); -/** - * ixgbe_atr_get_dst_port_82599 - Gets the destination port - * @input: input stream to modify - * @dst_port: the destination port to load - * - * Even though the input is given in big-endian, the FDIRPORT registers - * expect the ports to be programmed in little-endian. Hence the need to swap - * endianness when retrieving the data. This can be confusing since the - * internal hash engine expects it to be big-endian. - **/ -static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, - u16 *dst_port) -{ - *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; - *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); - return 0; -} + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); -/** - * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes - * @input: input stream to modify - * @flex_bytes: the flexible bytes to load - **/ -static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, - u16 *flex_byte) -{ - *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; - *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); - return 0; -} + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); -/** - * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type - * @input: input stream to modify - * @l4type: the layer 4 type value to load - **/ -static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, - u8 *l4type) -{ - *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; - return 0; + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; } /** * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure - * @stream: input bitstream + * @input: unique input dword + * @common: compressed common input dword * @queue: queue index to direct traffic to **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, - struct ixgbe_atr_input *input, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, u8 queue) { u64 fdirhashcmd; - u64 fdircmd; - u32 fdirhash; - u16 bucket_hash, sig_hash; - u8 l4type; - - bucket_hash = ixgbe_atr_compute_hash_82599(input, - IXGBE_ATR_BUCKET_HASH_KEY); - - /* bucket_hash is only 15 bits */ - bucket_hash &= IXGBE_ATR_HASH_MASK; - - sig_hash = ixgbe_atr_compute_hash_82599(input, - IXGBE_ATR_SIGNATURE_HASH_KEY); - - /* Get the l4type in order to program FDIRCMD properly */ - /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ - ixgbe_atr_get_l4type_82599(input, &l4type); + u32 fdircmd; /* - * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits - * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ - fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; - - fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); - - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { - case IXGBE_ATR_L4TYPE_TCP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; - break; - case IXGBE_ATR_L4TYPE_UDP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; - break; - case IXGBE_ATR_L4TYPE_SCTP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; + switch (input.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: break; default: - hw_dbg(hw, "Error on l4type input\n"); + hw_dbg(hw, " Error on flow type input\n"); return IXGBE_ERR_CONFIG; } - if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) - fdircmd |= IXGBE_FDIRCMD_IPV6; + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; - fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); - fdirhashcmd = ((fdircmd << 32) | fdirhash); + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + return 0; } /** + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) +{ + u32 mask = ntohs(input_masks->dst_port_mask); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= ntohs(input_masks->src_port_mask); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + (((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +/** * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter * @hw: pointer to hardware structure * @input: input bitstream @@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, * hardware writes must be protected from one another. **/ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, - struct ixgbe_atr_input *input, + union ixgbe_atr_input *input, struct ixgbe_atr_input_masks *input_masks, u16 soft_id, u8 queue) { - u32 fdircmd = 0; u32 fdirhash; - u32 src_ipv4 = 0, dst_ipv4 = 0; - u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; - u16 src_port, dst_port, vlan_id, flex_bytes; - u16 bucket_hash; - u8 l4type; - u8 fdirm = 0; - - /* Get our input values */ - ixgbe_atr_get_l4type_82599(input, &l4type); + u32 fdircmd; + u32 fdirport, fdirtcpm; + u32 fdirvlan; + /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ + u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | + IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; /* - * Check l4type formatting, and bail out before we touch the hardware + * Check flow_type formatting, and bail out before we touch the hardware * if there's a configuration issue */ - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { - case IXGBE_ATR_L4TYPE_TCP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; - break; - case IXGBE_ATR_L4TYPE_UDP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; - break; - case IXGBE_ATR_L4TYPE_SCTP: - fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ + fdirm |= IXGBE_FDIRM_L4P; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + if (input_masks->dst_port_mask || input_masks->src_port_mask) { + hw_dbg(hw, " Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: break; default: - hw_dbg(hw, "Error on l4type input\n"); + hw_dbg(hw, " Error on flow type input\n"); return IXGBE_ERR_CONFIG; } - bucket_hash = ixgbe_atr_compute_hash_82599(input, - IXGBE_ATR_BUCKET_HASH_KEY); - - /* bucket_hash is only 15 bits */ - bucket_hash &= IXGBE_ATR_HASH_MASK; - - ixgbe_atr_get_vlan_id_82599(input, &vlan_id); - ixgbe_atr_get_src_port_82599(input, &src_port); - ixgbe_atr_get_dst_port_82599(input, &dst_port); - ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); - - fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; - - /* Now figure out if we're IPv4 or IPv6 */ - if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { - /* IPv6 */ - ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, - &src_ipv6_3, &src_ipv6_4); - - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); - /* The last 4 bytes is the same register as IPv4 */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); - - fdircmd |= IXGBE_FDIRCMD_IPV6; - fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; - } else { - /* IPv4 */ - ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); - } - - ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); - IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); - - IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | - (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); - IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | - (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); - /* - * Program the relevant mask registers. L4type cannot be - * masked out in this implementation. + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. * * This also assumes IPv4 only. IPv6 masking isn't supported at this * point in time. */ - IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); - IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); - - switch (l4type & IXGBE_ATR_L4TYPE_MASK) { - case IXGBE_ATR_L4TYPE_TCP: - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); - IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, - (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | - (input_masks->dst_port_mask << 16))); + + /* Program FDIRM */ + switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) { + case 0xEFFF: + /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ + fdirm &= ~IXGBE_FDIRM_VLANID; + case 0xE000: + /* Unmask VLAN prio - bit 1 */ + fdirm &= ~IXGBE_FDIRM_VLANP; break; - case IXGBE_ATR_L4TYPE_UDP: - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); - IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, - (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | - (input_masks->src_port_mask << 16))); + case 0x0FFF: + /* Unmask VLAN ID - bit 0 */ + fdirm &= ~IXGBE_FDIRM_VLANID; break; - default: - /* this already would have failed above */ + case 0x0000: + /* do nothing, vlans already masked */ break; + default: + hw_dbg(hw, " Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; } - /* Program the last mask register, FDIRM */ - if (input_masks->vlan_id_mask) - /* Mask both VLAN and VLANP - bits 0 and 1 */ - fdirm |= 0x3; - - if (input_masks->data_mask) - /* Flex bytes need masking, so mask the whole thing - bit 4 */ - fdirm |= 0x10; + if (input_masks->flex_mask & 0xFFFF) { + if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { + hw_dbg(hw, " Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + /* Unmask Flex Bytes - bit 4 */ + fdirm &= ~IXGBE_FDIRM_FLEX; + } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ - fdirm |= 0x24; - IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); - fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; - fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; - fdircmd |= IXGBE_FDIRCMD_LAST; - fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; - fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_masks->src_ip_mask[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_masks->dst_ip_mask[0]); + + /* Apply masks to input data */ + input->formatted.vlan_id &= input_masks->vlan_id_mask; + input->formatted.flex_bytes &= input_masks->flex_mask; + input->formatted.src_port &= input_masks->src_port_mask; + input->formatted.dst_port &= input_masks->dst_port_mask; + input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; + input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = + IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes)); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + /* we only want the bucket hash so drop the upper 16 bits */ + fdirhash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY); + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); return 0; } + /** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 23ff23e..2002ea8 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_ctl &= ~IXGBE_RXCTRL_RXEN; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); - reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); - reg_ctl &= ~IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl); + ixgbe_disable_rx_queue(adapter, rx_ring); /* now Tx */ reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); @@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev, struct ethtool_rx_ntuple *cmd) { struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; - struct ixgbe_atr_input input_struct; + struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs; + union ixgbe_atr_input input_struct; struct ixgbe_atr_input_masks input_masks; int target_queue; + int err; if (adapter->hw.mac.type == ixgbe_mac_82598EB) return -EOPNOTSUPP; @@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev, * Don't allow programming if the action is a queue greater than * the number of online Tx queues. */ - if ((fs.action >= adapter->num_tx_queues) || - (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) + if ((fs->action >= adapter->num_tx_queues) || + (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP)) return -EINVAL; - memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); + memset(&input_struct, 0, sizeof(union ixgbe_atr_input)); memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); - input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; - input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; - input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; - input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; - input_masks.vlan_id_mask = fs.vlan_tag_mask; - /* only use the lowest 2 bytes for flex bytes */ - input_masks.data_mask = (fs.data_mask & 0xffff); - - switch (fs.flow_type) { + /* record flow type */ + switch (fs->flow_type) { + case IPV4_FLOW: + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; case TCP_V4_FLOW: - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; break; case UDP_V4_FLOW: - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; break; case SCTP_V4_FLOW: - ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); + input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; break; default: return -1; } - /* Mask bits from the inputs based on user-supplied mask */ - ixgbe_atr_set_src_ipv4_82599(&input_struct, - (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); - ixgbe_atr_set_dst_ipv4_82599(&input_struct, - (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); - /* 82599 expects these to be byte-swapped for perfect filtering */ - ixgbe_atr_set_src_port_82599(&input_struct, - ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); - ixgbe_atr_set_dst_port_82599(&input_struct, - ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); - - /* VLAN and Flex bytes are either completely masked or not */ - if (!fs.vlan_tag_mask) - ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); - - if (!input_masks.data_mask) - /* make sure we only use the first 2 bytes of user data */ - ixgbe_atr_set_flex_byte_82599(&input_struct, - (fs.data & 0xffff)); + /* copy vlan tag minus the CFI bit */ + if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) { + input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF); + if (!fs->vlan_tag_mask) { + input_masks.vlan_id_mask = htons(0xEFFF); + } else { + switch (~fs->vlan_tag_mask & 0xEFFF) { + /* all of these are valid vlan-mask values */ + case 0xEFFF: + case 0xE000: + case 0x0FFF: + case 0x0000: + input_masks.vlan_id_mask = + htons(~fs->vlan_tag_mask); + break; + /* exit with error if vlan-mask is invalid */ + default: + e_err(drv, "Partial VLAN ID or " + "priority mask in vlan-mask is not " + "supported by hardware\n"); + return -1; + } + } + } + + /* make sure we only use the first 2 bytes of user data */ + if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) { + input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF); + if (!(fs->data_mask & 0xFFFF)) { + input_masks.flex_mask = 0xFFFF; + } else if (~fs->data_mask & 0xFFFF) { + e_err(drv, "Partial user-def-mask is not " + "supported by hardware\n"); + return -1; + } + } + + /* + * Copy input into formatted structures + * + * These assignments are based on the following logic + * If neither input or mask are set assume value is masked out. + * If input is set, but mask is not mask should default to accept all. + * If input is not set, but mask is set then mask likely results in 0. + * If input is set and mask is set then assign both. + */ + if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) { + input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src; + if (!fs->m_u.tcp_ip4_spec.ip4src) + input_masks.src_ip_mask[0] = 0xFFFFFFFF; + else + input_masks.src_ip_mask[0] = + ~fs->m_u.tcp_ip4_spec.ip4src; + } + if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) { + input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst; + if (!fs->m_u.tcp_ip4_spec.ip4dst) + input_masks.dst_ip_mask[0] = 0xFFFFFFFF; + else + input_masks.dst_ip_mask[0] = + ~fs->m_u.tcp_ip4_spec.ip4dst; + } + if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) { + input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc; + if (!fs->m_u.tcp_ip4_spec.psrc) + input_masks.src_port_mask = 0xFFFF; + else + input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc; + } + if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) { + input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst; + if (!fs->m_u.tcp_ip4_spec.pdst) + input_masks.dst_port_mask = 0xFFFF; + else + input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst; + } /* determine if we need to drop or route the packet */ - if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) + if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP) target_queue = MAX_RX_QUEUES - 1; else - target_queue = fs.action; + target_queue = fs->action; spin_lock(&adapter->fdir_perfect_lock); - ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, - &input_masks, 0, target_queue); + err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, + &input_struct, + &input_masks, 0, + target_queue); spin_unlock(&adapter->fdir_perfect_lock); - return 0; + return err ? -1 : 0; } static const struct ethtool_ops ixgbe_ethtool_ops = { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38ab4f3..a060610 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, } } +void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; + + /* write value back with RXDCTL.ENABLE bit cleared */ + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + /* the hardware may take up to 100us to really disable the rx queue */ + do { + udelay(10); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within " + "the polling period\n", reg_idx); + } +} + void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { @@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, /* disable queue to avoid issues while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), - rxdctl & ~IXGBE_RXDCTL_ENABLE); - IXGBE_WRITE_FLUSH(hw); + ixgbe_disable_rx_queue(adapter, ring); IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); @@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter) rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - IXGBE_WRITE_FLUSH(hw); + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) + /* this call also flushes the previous write */ + ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); + msleep(10); netif_tx_stop_all_queues(netdev); @@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE | + IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + e_err(probe, + "Flow Director is not supported while multiple " + "queues are disabled. Disabling Flow Director\n"); + } adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; adapter->atr_sample_rate = 0; @@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; - if (dev->features & NETIF_F_NTUPLE) { - /* Flow Director perfect filter enabled */ - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - adapter->atr_sample_rate = 0; - spin_lock_init(&adapter->fdir_perfect_lock); - } else { - /* Flow Director hash filters enabled */ - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 20; - } + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + /* Flow Director hash filters enabled */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 20; adapter->ring_feature[RING_F_FDIR].indices = IXGBE_MAX_FDIR_INDICES; adapter->fdir_pballoc = 0; @@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, writel(i, tx_ring->tail); } -static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, - u8 queue, u32 tx_flags, __be16 protocol) +static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol) { - struct ixgbe_atr_input atr_input; - struct iphdr *iph = ip_hdr(skb); - struct ethhdr *eth = (struct ethhdr *)skb->data; + struct ixgbe_q_vector *q_vector = ring->q_vector; + union ixgbe_atr_hash_dword input = { .dword = 0 }; + union ixgbe_atr_hash_dword common = { .dword = 0 }; + union { + unsigned char *network; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; struct tcphdr *th; - u16 vlan_id; + __be16 vlan_id; - /* Right now, we support IPv4 w/ TCP only */ - if (protocol != htons(ETH_P_IP) || - iph->protocol != IPPROTO_TCP) + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) return; - memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); + /* do nothing if sampling is disabled */ + if (!ring->atr_sample_rate) + return; - vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> - IXGBE_TX_FLAGS_VLAN_SHIFT; + ring->atr_count++; + + /* snag network header to get L4 type and address */ + hdr.network = skb_network_header(skb); + + /* Currently only IPv4/IPv6 with TCP is supported */ + if ((protocol != __constant_htons(ETH_P_IPV6) || + hdr.ipv6->nexthdr != IPPROTO_TCP) && + (protocol != __constant_htons(ETH_P_IP) || + hdr.ipv4->protocol != IPPROTO_TCP)) + return; th = tcp_hdr(skb); - ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); - ixgbe_atr_set_src_port_82599(&atr_input, th->dest); - ixgbe_atr_set_dst_port_82599(&atr_input, th->source); - ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); - ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); - /* src and dst are inverted, think how the receiver sees them */ - ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); - ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); + /* skip this packet since the socket is closing */ + if (th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && (ring->atr_count < ring->atr_sample_rate)) + return; + + /* reset sample count */ + ring->atr_count = 0; + + vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + + /* + * src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = vlan_id; + + /* + * since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (vlan_id) + common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); + else + common.port.src ^= th->dest ^ protocol; + common.port.dst ^= th->source; + + if (protocol == __constant_htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } else { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } /* This assumes the Rx queue and Tx queue are bound to the same CPU */ - ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); } static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) @@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); if (count) { /* add the ATR filter if ATR is on */ - if (tx_ring->atr_sample_rate) { - ++tx_ring->atr_count; - if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && - test_bit(__IXGBE_TX_FDIR_INIT_DONE, - &tx_ring->state)) { - ixgbe_atr(adapter, skb, tx_ring->queue_index, - tx_flags, protocol); - tx_ring->atr_count = 0; - } - } + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, skb, tx_flags, protocol); txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); txq->tx_bytes += skb->len; txq->tx_packets++; diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 446f3467..fd3358f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRM_VLANID 0x00000001 #define IXGBE_FDIRM_VLANP 0x00000002 #define IXGBE_FDIRM_POOL 0x00000004 -#define IXGBE_FDIRM_L3P 0x00000008 -#define IXGBE_FDIRM_L4P 0x00000010 -#define IXGBE_FDIRM_FLEX 0x00000020 -#define IXGBE_FDIRM_DIPv6 0x00000040 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 #define IXGBE_FDIRFREE_FREE_MASK 0xFFFF #define IXGBE_FDIRFREE_FREE_SHIFT 0 @@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCMD_LAST 0x00000800 #define IXGBE_FDIRCMD_COLLISION 0x00001000 #define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 #define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 #define IXGBE_FDIR_INIT_DONE_POLL 10 @@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer; #define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) /* Software ATR hash keys */ -#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D -#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 - -/* Software ATR input stream offsets and masks */ -#define IXGBE_ATR_VLAN_OFFSET 0 -#define IXGBE_ATR_SRC_IPV6_OFFSET 2 -#define IXGBE_ATR_SRC_IPV4_OFFSET 14 -#define IXGBE_ATR_DST_IPV6_OFFSET 18 -#define IXGBE_ATR_DST_IPV4_OFFSET 30 -#define IXGBE_ATR_SRC_PORT_OFFSET 34 -#define IXGBE_ATR_DST_PORT_OFFSET 36 -#define IXGBE_ATR_FLEX_BYTE_OFFSET 38 -#define IXGBE_ATR_VM_POOL_OFFSET 40 -#define IXGBE_ATR_L4TYPE_OFFSET 41 +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff #define IXGBE_ATR_L4TYPE_MASK 0x3 -#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 #define IXGBE_ATR_L4TYPE_UDP 0x1 #define IXGBE_ATR_L4TYPE_TCP 0x2 #define IXGBE_ATR_L4TYPE_SCTP 0x3 -#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; /* Flow Director ATR input struct. */ -struct ixgbe_atr_input { - /* Byte layout in order, all values with MSB first: +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: * + * vm_pool - 1 byte + * flow_type - 1 byte * vlan_id - 2 bytes * src_ip - 16 bytes * dst_ip - 16 bytes * src_port - 2 bytes * dst_port - 2 bytes * flex_bytes - 2 bytes - * vm_pool - 1 byte - * l4type - 1 byte + * rsvd0 - 2 bytes - space reserved must be 0. */ - u8 byte_stream[42]; + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 rsvd0; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; }; struct ixgbe_atr_input_masks { - u32 src_ip_mask; - u32 dst_ip_mask; - u16 src_port_mask; - u16 dst_port_mask; - u16 vlan_id_mask; - u16 data_mask; + __be16 rsvd0; + __be16 vlan_id_mask; + __be32 dst_ip_mask[4]; + __be32 src_ip_mask[4]; + __be16 src_port_mask; + __be16 dst_port_mask; + __be16 flex_mask; }; enum ixgbe_eeprom_type { diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 6d6806b..897f576 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int i; int err; - dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); + dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), + prof->tx_ring_num, prof->rx_ring_num); if (dev == NULL) { mlx4_err(mdev, "Net device allocation failed\n"); return -ENOMEM; diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 2c15891..e953793 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 78d70a6..a1b82c9 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -32,6 +32,7 @@ #include <linux/init.h> #include <linux/jiffies.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <asm/uaccess.h> #include <asm/string.h> @@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap) data = ap->tpkt->data; count = ap->tpkt->len; fcs = ap->tfcs; - proto = (data[0] << 8) + data[1]; + proto = get_unaligned_be16(data); /* * LCP packets with code values between 1 (configure-reqest) @@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, code = data[0]; if (code != CONFACK && code != CONFREQ) return; - dlen = (data[2] << 8) + data[3]; + dlen = get_unaligned_be16(data + 2); if (len < dlen) return; /* packet got truncated or length is bogus */ @@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { switch (data[0]) { case LCP_MRU: - val = (data[2] << 8) + data[3]; + val = get_unaligned_be16(data + 2); if (inbound) ap->mru = val; else ap->chan.mtu = val; break; case LCP_ASYNCMAP: - val = (data[2] << 24) + (data[3] << 16) - + (data[4] << 8) + data[5]; + val = get_unaligned_be32(data + 2); if (inbound) ap->raccm = val; else diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c index 695bc83..4358330 100644 --- a/drivers/net/ppp_deflate.c +++ b/drivers/net/ppp_deflate.c @@ -41,6 +41,7 @@ #include <linux/ppp-comp.h> #include <linux/zlib.h> +#include <asm/unaligned.h> /* * State for a Deflate (de)compressor. @@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, */ wptr[0] = PPP_ADDRESS(rptr); wptr[1] = PPP_CONTROL(rptr); - wptr[2] = PPP_COMP >> 8; - wptr[3] = PPP_COMP; + put_unaligned_be16(PPP_COMP, wptr + 2); wptr += PPP_HDRLEN; - wptr[0] = state->seqno >> 8; - wptr[1] = state->seqno; + put_unaligned_be16(state->seqno, wptr); wptr += DEFLATE_OVHD; olen = PPP_HDRLEN + DEFLATE_OVHD; state->strm.next_out = wptr; @@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize, } /* Check the sequence number. */ - seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; + seq = get_unaligned_be16(ibuf + PPP_HDRLEN); if (seq != (state->seqno & 0xffff)) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6456484..c7a6c44 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -46,6 +46,7 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <net/slhc_vj.h> #include <asm/atomic.h> @@ -210,7 +211,7 @@ struct ppp_net { }; /* Get the PPP protocol number from a skb */ -#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) +#define PPP_PROTO(skb) get_unaligned_be16((skb)->data) /* We limit the length of ppp->file.rq to this (arbitrary) value */ #define PPP_MAX_RQLEN 32 @@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) pp = skb_push(skb, 2); proto = npindex_to_proto[npi]; - pp[0] = proto >> 8; - pp[1] = proto; + put_unaligned_be16(proto, pp); netif_stop_queue(dev); skb_queue_tail(&ppp->file.xq, skb); @@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) q = skb_put(frag, flen + hdrlen); /* make the MP header */ - q[0] = PPP_MP >> 8; - q[1] = PPP_MP; + put_unaligned_be16(PPP_MP, q); if (ppp->flags & SC_MP_XSHORTSEQ) { q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q[3] = ppp->nxseq; diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 6d1a1b8..9a1849a 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -55,6 +55,7 @@ #include <linux/ppp_defs.h> #include <linux/ppp-comp.h> #include <linux/scatterlist.h> +#include <asm/unaligned.h> #include "ppp_mppe.h" @@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); - obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ - obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ + put_unaligned_be16(PPP_COMP, obuf + 2); obuf += PPP_HDRLEN; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (state->debug >= 7) printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, state->ccount); - obuf[0] = state->ccount >> 8; - obuf[1] = state->ccount & 0xff; + put_unaligned_be16(state->ccount, obuf); if (!state->stateful || /* stateless mode */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 4c95ec3..4e6b72f 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -45,6 +45,7 @@ #include <linux/completion.h> #include <linux/init.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <asm/uaccess.h> #define PPP_VERSION "2.4.2" @@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) int islcp; data = skb->data; - proto = (data[0] << 8) + data[1]; + proto = get_unaligned_be16(data); /* LCP packets with codes between 1 (configure-request) * and 7 (code-reject) must be sent as though no options diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 9c2a02d..44e316f 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -34,8 +34,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 0 -#define _QLCNIC_LINUX_SUBVERSION 14 -#define QLCNIC_LINUX_VERSIONID "5.0.14" +#define _QLCNIC_LINUX_SUBVERSION 15 +#define QLCNIC_LINUX_VERSIONID "5.0.15" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -289,6 +289,26 @@ struct uni_data_desc{ u32 reserved[5]; }; +/* Flash Defines and Structures */ +#define QLCNIC_FLT_LOCATION 0x3F1000 +#define QLCNIC_FW_IMAGE_REGION 0x74 +struct qlcnic_flt_header { + u16 version; + u16 len; + u16 checksum; + u16 reserved; +}; + +struct qlcnic_flt_entry { + u8 region; + u8 reserved0; + u8 attrib; + u8 reserved1; + u32 size; + u32 start_addr; + u32 end_add; +}; + /* Magic number to let user know flash is programmed */ #define QLCNIC_BDINFO_MAGIC 0x12345678 diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c index 1e7af70..4c14510 100644 --- a/drivers/net/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/qlcnic/qlcnic_ethtool.c @@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, if (data[1]) eth_test->flags |= ETH_TEST_FL_FAILED; - if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (eth_test->flags & ETH_TEST_FL_OFFLINE) { data[2] = qlcnic_irq_test(dev); if (data[2]) eth_test->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 9b9c7c3..a7f1d5b 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) { return 0; } +static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region, + struct qlcnic_flt_entry *region_entry) +{ + struct qlcnic_flt_header flt_hdr; + struct qlcnic_flt_entry *flt_entry; + int i = 0, ret; + u32 entry_size; + + memset(region_entry, 0, sizeof(struct qlcnic_flt_entry)); + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION, + (u8 *)&flt_hdr, + sizeof(struct qlcnic_flt_header)); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout header\n"); + return -EIO; + } + + entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header); + flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size); + if (flt_entry == NULL) { + dev_warn(&adapter->pdev->dev, "error allocating memory\n"); + return -EIO; + } + + ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION + + sizeof(struct qlcnic_flt_header), + (u8 *)flt_entry, entry_size); + if (ret) { + dev_warn(&adapter->pdev->dev, + "error reading flash layout entries\n"); + goto err_out; + } + + while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) { + if (flt_entry[i].region == region) + break; + i++; + } + if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) { + dev_warn(&adapter->pdev->dev, + "region=%x not found in %d regions\n", region, i); + ret = -EIO; + goto err_out; + } + memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry)); + +err_out: + vfree(flt_entry); + return ret; +} + int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) { + struct qlcnic_flt_entry fw_entry; u32 ver = -1, min_ver; + int ret; - qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); + ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry); + if (!ret) + /* 0-4:-signature, 4-8:-fw version */ + qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4, + (int *)&ver); + else + qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, + (int *)&ver); ver = QLCNIC_DECODE_VERSION(ver); min_ver = QLCNIC_MIN_FW_VERSION; diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 11e3a46..37c04b4 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " static struct workqueue_struct *qlcnic_wq; static int qlcnic_mac_learn; -module_param(qlcnic_mac_learn, int, 0644); +module_param(qlcnic_mac_learn, int, 0444); MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); static int use_msi = 1; -module_param(use_msi, int, 0644); +module_param(use_msi, int, 0444); MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); static int use_msi_x = 1; -module_param(use_msi_x, int, 0644); +module_param(use_msi_x, int, 0444); MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); static int auto_fw_reset = AUTO_FW_RESET_ENABLED; @@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644); MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); static int load_fw_file; -module_param(load_fw_file, int, 0644); +module_param(load_fw_file, int, 0444); MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); static int qlcnic_config_npars; -module_param(qlcnic_config_npars, int, 0644); +module_param(qlcnic_config_npars, int, 0444); MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); static int __devinit qlcnic_probe(struct pci_dev *pdev, diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 27a7c20..bb8645a 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -1632,36 +1632,134 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw) { __le32 *phytable = (__le32 *)fw->data; struct net_device *dev = tp->dev; - size_t i; + size_t index, fw_size = fw->size / sizeof(*phytable); + u32 predata, count; if (fw->size % sizeof(*phytable)) { netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); return; } - for (i = 0; i < fw->size / sizeof(*phytable); i++) { - u32 action = le32_to_cpu(phytable[i]); + for (index = 0; index < fw_size; index++) { + u32 action = le32_to_cpu(phytable[index]); + u32 regno = (action & 0x0fff0000) >> 16; - if (!action) + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_READ_EFUSE: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= fw_size) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= fw_size) { + netif_err(tp, probe, tp->dev, + "Out of range of firmware\n"); + return; + } break; - if ((action & 0xf0000000) != PHY_WRITE) { - netif_err(tp, probe, dev, - "unknown action 0x%08x\n", action); + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: + default: + netif_err(tp, probe, tp->dev, + "Invalid action 0x%08x\n", action); return; } } - while (i-- != 0) { - u32 action = le32_to_cpu(*phytable); + predata = 0; + count = 0; + + for (index = 0; index < fw_size; ) { + u32 action = le32_to_cpu(phytable[index]); u32 data = action & 0x0000ffff; - u32 reg = (action & 0x0fff0000) >> 16; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_READ_EFUSE: + predata = rtl8168d_efuse_read(tp->mmio_addr, regno); + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; case PHY_WRITE: - rtl_writephy(tp, reg, data); - phytable++; + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index += 2; + else + index += 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + case PHY_READ_MAC_BYTE: + case PHY_WRITE_MAC_BYTE: + case PHY_WRITE_ERI_WORD: default: BUG(); } @@ -3069,15 +3167,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl8168_driver_start(tp); } - rtl8169_init_phy(dev, tp); - - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); - device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); if (pci_dev_run_wake(pdev)) @@ -3127,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) static int rtl8169_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; int retval = -ENOMEM; @@ -3162,6 +3252,15 @@ static int rtl8169_open(struct net_device *dev) napi_enable(&tp->napi); + rtl8169_init_phy(dev, tp); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); + rtl_pll_power_up(tp); rtl_hw_start(dev); @@ -3171,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev) tp->saved_wolopts = 0; pm_runtime_put_noidle(&pdev->dev); - rtl8169_check_link_status(dev, tp, tp->mmio_addr); + rtl8169_check_link_status(dev, tp, ioaddr); out: return retval; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 39996bf..7d85a38 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -46,10 +46,6 @@ #include <asm/irq.h> -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define SKY2_VLAN_TAG_USED 1 -#endif - #include "sky2.h" #define DRV_NAME "sky2" @@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } -#ifdef SKY2_VLAN_TAG_USED -static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) +#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX) + +static void sky2_vlan_mode(struct net_device *dev) { - if (onoff) { + struct sky2_port *sky2 = netdev_priv(dev); + struct sky2_hw *hw = sky2->hw; + u16 port = sky2->port; + + if (dev->features & NETIF_F_HW_VLAN_RX) sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); - sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), - TX_VLAN_TAG_ON); - } else { + else sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); + + dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN; + if (dev->features & NETIF_F_HW_VLAN_TX) + sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), + TX_VLAN_TAG_ON); + else { sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); + + /* Can't do transmit offload of vlan without hw vlan */ + dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG + | NETIF_F_ALL_CSUM); } } -static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) -{ - struct sky2_port *sky2 = netdev_priv(dev); - struct sky2_hw *hw = sky2->hw; - u16 port = sky2->port; - - netif_tx_lock_bh(dev); - napi_disable(&hw->napi); - - sky2->vlgrp = grp; - sky2_set_vlan_mode(hw, port, grp != NULL); - - sky2_read32(hw, B0_Y2_SP_LISR); - napi_enable(&hw->napi); - netif_tx_unlock_bh(dev); -} -#endif - /* Amount of required worst case padding in rx buffer */ static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) { @@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2) sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2->tx_ring_size - 1); -#ifdef SKY2_VLAN_TAG_USED - sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); -#endif + sky2_vlan_mode(sky2->netdev); sky2_rx_start(sky2); } @@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, } ctrl = 0; -#ifdef SKY2_VLAN_TAG_USED + /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ if (vlan_tx_tag_present(skb)) { if (!le) { @@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, le->length = cpu_to_be16(vlan_tx_tag_get(skb)); ctrl |= INS_VLAN; } -#endif /* Handle TCP checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev, struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; -#ifdef SKY2_VLAN_TAG_USED - /* Account for vlan tag */ - if (sky2->vlgrp && (status & GMR_FS_VLAN)) - count -= VLAN_HLEN; -#endif + if (status & GMR_FS_VLAN) + count -= VLAN_HLEN; /* Account for vlan tag */ netif_printk(sky2, rx_status, KERN_DEBUG, dev, "rx slot %u status 0x%x len %d\n", @@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) static inline void sky2_skb_rx(const struct sky2_port *sky2, u32 status, struct sk_buff *skb) { -#ifdef SKY2_VLAN_TAG_USED - u16 vlan_tag = be16_to_cpu(sky2->rx_tag); - if (sky2->vlgrp && (status & GMR_FS_VLAN)) { - if (skb->ip_summed == CHECKSUM_NONE) - vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag); - else - vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp, - vlan_tag, skb); - return; - } -#endif + if (status & GMR_FS_VLAN) + __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); + if (skb->ip_summed == CHECKSUM_NONE) netif_receive_skb(skb); else @@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) goto exit_loop; break; -#ifdef SKY2_VLAN_TAG_USED case OP_RXVLAN: sky2->rx_tag = length; break; @@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) case OP_RXCHKSVLAN: sky2->rx_tag = length; /* fall through */ -#endif case OP_RXCHKS: if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) sky2_rx_checksum(sky2, status); @@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw) | SKY2_HW_NEW_LE | SKY2_HW_AUTO_TX_SUM | SKY2_HW_ADV_POWER_CTL; + + /* The workaround for status conflicts VLAN tag detection. */ + if (hw->chip_rev == CHIP_REV_YU_FE2_A0) + hw->flags |= SKY2_HW_VLAN_BROKEN; break; case CHIP_ID_YUKON_SUPR: @@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw) u32 modes = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg | SUPPORTED_TP; + | SUPPORTED_100baseT_Full; if (hw->flags & SKY2_HW_GIGABIT) modes |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; return modes; } else - return SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_FIBRE; + return SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full; } static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) @@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (sky2_is_copper(hw)) { ecmd->port = PORT_TP; ecmd->speed = sky2->speed; + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP; } else { ecmd->speed = SPEED_1000; ecmd->port = PORT_FIBRE; + ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE; } ecmd->advertising = sky2->advertising; @@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) u32 supported = sky2_supported_modes(hw); if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ecmd->advertising & ~supported) + return -EINVAL; + + if (sky2_is_copper(hw)) + sky2->advertising = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + else + sky2->advertising = ecmd->advertising | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + sky2->flags |= SKY2_FLAG_AUTO_SPEED; - ecmd->advertising = supported; sky2->duplex = -1; sky2->speed = -1; } else { @@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; } - sky2->advertising = ecmd->advertising; - if (netif_running(dev)) { sky2_phy_reinit(sky2); sky2_set_multicast(dev); @@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom static int sky2_set_flags(struct net_device *dev, u32 data) { struct sky2_port *sky2 = netdev_priv(dev); - u32 supported = - (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; + unsigned long old_feat = dev->features; + u32 supported = 0; int rc; + if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN)) + supported |= ETH_FLAG_RXHASH; + + if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN)) + supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; + + printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n", + supported, data); + rc = ethtool_op_set_flags(dev, data, supported); if (rc) return rc; - rx_set_rss(dev); + if ((old_feat ^ dev->features) & NETIF_F_RXHASH) + rx_set_rss(dev); + + if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN) + sky2_vlan_mode(dev); return 0; } @@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = { .get_sset_count = sky2_get_sset_count, .get_ethtool_stats = sky2_get_ethtool_stats, .set_flags = sky2_set_flags, + .get_flags = ethtool_op_get_flags, }; #ifdef CONFIG_SKY2_DEBUG @@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_change_mtu = sky2_change_mtu, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, -#ifdef SKY2_VLAN_TAG_USED - .ndo_vlan_rx_register = sky2_vlan_rx_register, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sky2_netpoll, #endif @@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_change_mtu = sky2_change_mtu, .ndo_tx_timeout = sky2_tx_timeout, .ndo_get_stats64 = sky2_get_stats, -#ifdef SKY2_VLAN_TAG_USED - .ndo_vlan_rx_register = sky2_vlan_rx_register, -#endif }, }; @@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->port = port; dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG - | NETIF_F_TSO | NETIF_F_GRO; + | NETIF_F_TSO | NETIF_F_GRO; + if (highmem) dev->features |= NETIF_F_HIGHDMA; @@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, if (!(hw->flags & SKY2_HW_RSS_BROKEN)) dev->features |= NETIF_F_RXHASH; -#ifdef SKY2_VLAN_TAG_USED - /* The workaround for FE+ status conflicts with VLAN tag detection. */ - if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && - sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { + if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - } -#endif /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 80bdc40..6861b0e 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2236,11 +2236,8 @@ struct sky2_port { u16 rx_pending; u16 rx_data_size; u16 rx_nfrags; - -#ifdef SKY2_VLAN_TAG_USED u16 rx_tag; - struct vlan_group *vlgrp; -#endif + struct { unsigned long last; u32 mac_rp; @@ -2284,6 +2281,7 @@ struct sky2_hw { #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ #define SKY2_HW_RSS_BROKEN 0x00000100 +#define SKY2_HW_VLAN_BROKEN 0x00000200 u8 chip_id; u8 chip_rev; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index cdbeec9..546de57 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || - netif_needs_gso(dev, skb))) { + netif_needs_gso(skb, netif_skb_features(skb)))) { spin_unlock_irq(&np->tx_lock); goto drop; } diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h index 904dec7..a69554e 100644 --- a/include/linux/bfin_mac.h +++ b/include/linux/bfin_mac.h @@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data { const unsigned short *mac_peripherals; int phy_mode; unsigned int phy_mask; + unsigned short vlan1_mask, vlan2_mask; }; #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index f16a010..bec8b82 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev); -extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); +extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs); #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) +#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) /** * is_zero_ether_addr - Determine if give Ethernet address is all zeros. diff --git a/include/linux/fec.h b/include/linux/fec.h index 5d3523d..bcff455 100644 --- a/include/linux/fec.h +++ b/include/linux/fec.h @@ -3,6 +3,8 @@ * Copyright (c) 2009 Orex Computed Radiography * Baruch Siach <baruch@tkos.co.il> * + * Copyright (C) 2010 Freescale Semiconductor, Inc. + * * Header file for the FEC platform data * * This program is free software; you can redistribute it and/or modify @@ -16,6 +18,7 @@ struct fec_platform_data { phy_interface_t phy; + unsigned char mac[ETH_ALEN]; }; #endif diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index f7e73c3..dd3f201 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -103,7 +103,7 @@ struct __fdb_entry { extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); -typedef int (*br_should_route_hook_t)(struct sk_buff *skb); +typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; #endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0f6b1c9..be4957c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2191,11 +2191,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) extern void ether_setup(struct net_device *dev); /* Support for loadable net-drivers */ -extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, +extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, void (*setup)(struct net_device *), - unsigned int queue_count); + unsigned int txqs, unsigned int rxqs); #define alloc_netdev(sizeof_priv, name, setup) \ - alloc_netdev_mq(sizeof_priv, name, setup, 1) + alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) + +#define alloc_netdev_mq(sizeof_priv, name, setup, count) \ + alloc_netdev_mqs(sizeof_priv, name, setup, count, count) + extern int register_netdev(struct net_device *dev); extern void unregister_netdev(struct net_device *dev); @@ -2303,7 +2307,7 @@ unsigned long netdev_fix_features(unsigned long features, const char *name); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev); +int netif_skb_features(struct sk_buff *skb); static inline int net_gso_ok(int features, int gso_type) { @@ -2317,16 +2321,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features) (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); } -static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) +static inline int netif_needs_gso(struct sk_buff *skb, int features) { - if (skb_is_gso(skb)) { - int features = netif_get_vlan_features(skb, dev); - - return (!skb_gso_ok(skb, features) || - unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); - } - - return 0; + return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || + unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); } static inline void netif_set_gso_max_size(struct net_device *dev, diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 742bec0..6712e71 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info); * necessary for reading the counters. */ struct xt_info_lock { - spinlock_t lock; + seqlock_t lock; unsigned char readers; }; DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); @@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void) local_bh_disable(); lock = &__get_cpu_var(xt_info_locks); if (likely(!lock->readers++)) - spin_lock(&lock->lock); + write_seqlock(&lock->lock); } static inline void xt_info_rdunlock_bh(void) @@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void) struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); if (likely(!--lock->readers)) - spin_unlock(&lock->lock); + write_sequnlock(&lock->lock); local_bh_enable(); } @@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void) */ static inline void xt_info_wrlock(unsigned int cpu) { - spin_lock(&per_cpu(xt_info_locks, cpu).lock); + write_seqlock(&per_cpu(xt_info_locks, cpu).lock); } static inline void xt_info_wrunlock(unsigned int cpu) { - spin_unlock(&per_cpu(xt_info_locks, cpu).lock); + write_sequnlock(&per_cpu(xt_info_locks, cpu).lock); } /* diff --git a/include/net/ah.h b/include/net/ah.h index f0129f7..be7798d 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -4,7 +4,7 @@ #include <linux/skbuff.h> /* This is the maximum truncated ICV length that we know of. */ -#define MAX_AH_AUTH_LEN 12 +#define MAX_AH_AUTH_LEN 16 struct crypto_ahash; diff --git a/include/net/arp.h b/include/net/arp.h index f4cf6ce..91f0568 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -25,5 +25,6 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, const unsigned char *src_hw, const unsigned char *target_hw); extern void arp_xmit(struct sk_buff *skb); +int arp_invalidate(struct net_device *dev, __be32 ip); #endif /* _ARP_H */ diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index d5df797..5395e09 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -107,8 +107,8 @@ struct phonet_protocol { int sock_type; }; -int phonet_proto_register(int protocol, struct phonet_protocol *pp); -void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); +int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp); +void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp); int phonet_sysctl_init(void); void phonet_sysctl_exit(void); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0af57eb..e9eee99 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q) return q->q.qlen; } -static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) +static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) { return (struct qdisc_skb_cb *)skb->cb; } @@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) return true; } -static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) +static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) { return qdisc_skb_cb(skb)->pkt_len; } @@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; } -static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) + +static inline void bstats_update(struct gnet_stats_basic_packed *bstats, + const struct sk_buff *skb) +{ + bstats->bytes += qdisc_pkt_len(skb); + bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; +} + +static inline void qdisc_bstats_update(struct Qdisc *sch, + const struct sk_buff *skb) { - sch->bstats.bytes += len; - sch->bstats.packets++; + bstats_update(&sch->bstats, skb); } static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, @@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, { __skb_queue_tail(list, skb); sch->qstats.backlog += qdisc_pkt_len(skb); - __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/include/net/sock.h b/include/net/sock.h index 21a02f7..d884d26 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -152,14 +152,18 @@ struct sock_common { * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() */ + /* private: */ int skc_dontcopy_begin[0]; + /* public: */ union { struct hlist_node skc_node; struct hlist_nulls_node skc_nulls_node; }; int skc_tx_queue_mapping; atomic_t skc_refcnt; + /* private: */ int skc_dontcopy_end[0]; + /* public: */ }; /** diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 1bf0cf5..8184c03 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock, if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); - cf_sk->conn_req.param.size = ol; if (ol > sizeof(cf_sk->conn_req.param.data) || copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } + cf_sk->conn_req.param.size = ol; release_sock(&cf_sk->sk); return 0; diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 84a422c..fa9dab3 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); int pktlen; int err = 0; + const u8 *ip_version; + u8 buf; priv = container_of(layr, struct chnl_net, chnl); @@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) * send the packet to the net stack. */ skb->dev = priv->netdev; - skb->protocol = htons(ETH_P_IP); + + /* check the version of IP */ + ip_version = skb_header_pointer(skb, 0, 1, &buf); + if (!ip_version) + return -EINVAL; + switch (*ip_version >> 4) { + case 4: + skb->protocol = htons(ETH_P_IP); + break; + case 6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + return -EINVAL; + } /* If we change the header in loop mode, the checksum is corrupted. */ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) diff --git a/net/core/dev.c b/net/core/dev.c index a215269..a3ef808 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); -static bool can_checksum_protocol(unsigned long features, __be16 protocol) -{ - return ((features & NETIF_F_NO_CSUM) || - ((features & NETIF_F_V4_CSUM) && - protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_V6_CSUM) && - protocol == htons(ETH_P_IPV6)) || - ((features & NETIF_F_FCOE_CRC) && - protocol == htons(ETH_P_FCOE))); -} - -static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) -{ - __be16 protocol = skb->protocol; - int features = dev->features; - - if (vlan_tx_tag_present(skb)) { - features &= dev->vlan_features; - } else if (protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; - protocol = veh->h_vlan_encapsulated_proto; - features &= dev->vlan_features; - } - - return can_checksum_protocol(features, protocol); -} - /** * skb_dev_set -- assign a new device to a buffer * @skb: buffer for the new device @@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb) /** * dev_gso_segment - Perform emulated hardware segmentation on skb. * @skb: buffer to segment + * @features: device features as applicable to this skb * * This function segments the given skb and stores the list of segments * in skb->next. */ -static int dev_gso_segment(struct sk_buff *skb) +static int dev_gso_segment(struct sk_buff *skb, int features) { - struct net_device *dev = skb->dev; struct sk_buff *segs; - int features = dev->features & ~(illegal_highdma(dev, skb) ? - NETIF_F_SG : 0); segs = skb_gso_segment(skb, features); @@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb) } } -int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) +static bool can_checksum_protocol(unsigned long features, __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_V4_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_V6_CSUM) && + protocol == htons(ETH_P_IPV6)) || + ((features & NETIF_F_FCOE_CRC) && + protocol == htons(ETH_P_FCOE))); +} + +static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features) +{ + if (!can_checksum_protocol(protocol, features)) { + features &= ~NETIF_F_ALL_CSUM; + features &= ~NETIF_F_SG; + } else if (illegal_highdma(skb->dev, skb)) { + features &= ~NETIF_F_SG; + } + + return features; +} + +int netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; + int features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; - } else if (!skb->vlan_tci) - return dev->features; + } else if (!vlan_tx_tag_present(skb)) { + return harmonize_features(skb, protocol, features); + } - if (protocol != htons(ETH_P_8021Q)) - return dev->features & dev->vlan_features; - else - return 0; + features &= skb->dev->vlan_features; + + if (protocol != htons(ETH_P_8021Q)) { + return harmonize_features(skb, protocol, features); + } else { + features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | + NETIF_F_GEN_CSUM; + return harmonize_features(skb, protocol, features); + } } -EXPORT_SYMBOL(netif_get_vlan_features); +EXPORT_SYMBOL(netif_skb_features); /* * Returns true if either: @@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features); * support DMA from it. */ static inline int skb_needs_linearize(struct sk_buff *skb, - struct net_device *dev) + int features) { - if (skb_is_nonlinear(skb)) { - int features = dev->features; - - if (vlan_tx_tag_present(skb)) - features &= dev->vlan_features; - - return (skb_has_frag_list(skb) && - !(features & NETIF_F_FRAGLIST)) || + return skb_is_nonlinear(skb) && + ((skb_has_frag_list(skb) && + !(features & NETIF_F_FRAGLIST)) || (skb_shinfo(skb)->nr_frags && - (!(features & NETIF_F_SG) || - illegal_highdma(dev, skb))); - } - - return 0; + !(features & NETIF_F_SG))); } int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, @@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int rc = NETDEV_TX_OK; if (likely(!skb->next)) { + int features; + /* * If device doesnt need skb->dst, release it right now while * its hot in this cpu cache @@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, skb_orphan_try(skb); + features = netif_skb_features(skb); + if (vlan_tx_tag_present(skb) && - !(dev->features & NETIF_F_HW_VLAN_TX)) { + !(features & NETIF_F_HW_VLAN_TX)) { skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) goto out; @@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, skb->vlan_tci = 0; } - if (netif_needs_gso(dev, skb)) { - if (unlikely(dev_gso_segment(skb))) + if (netif_needs_gso(skb, features)) { + if (unlikely(dev_gso_segment(skb, features))) goto out_kfree_skb; if (skb->next) goto gso; } else { - if (skb_needs_linearize(skb, dev) && + if (skb_needs_linearize(skb, features) && __skb_linearize(skb)) goto out_kfree_skb; @@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb_checksum_start_offset(skb)); - if (!dev_can_checksum(dev, skb) && + if (!(features & NETIF_F_ALL_CSUM) && skb_checksum_help(skb)) goto out_kfree_skb; } @@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, */ if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) skb_dst_force(skb); - __qdisc_update_bstats(q, skb->len); + + qdisc_skb_cb(skb)->pkt_len = skb->len; + qdisc_bstats_update(q, skb); + if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (unlikely(contended)) { spin_unlock(&q->busylock); @@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev) } /** - * alloc_netdev_mq - allocate network device + * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for * @name: device name format string * @setup: callback to initialize device - * @queue_count: the number of subqueues to allocate + * @txqs: the number of TX subqueues to allocate + * @rxqs: the number of RX subqueues to allocate * * Allocates a struct net_device with private data area for driver use * and performs basic initialization. Also allocates subquue structs - * for each queue on the device at the end of the netdevice. + * for each queue on the device. */ -struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, - void (*setup)(struct net_device *), unsigned int queue_count) +struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + void (*setup)(struct net_device *), + unsigned int txqs, unsigned int rxqs) { struct net_device *dev; size_t alloc_size; @@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, BUG_ON(strlen(name) >= sizeof(dev->name)); - if (queue_count < 1) { + if (txqs < 1) { pr_err("alloc_netdev: Unable to allocate device " "with zero queues.\n"); return NULL; } +#ifdef CONFIG_RPS + if (rxqs < 1) { + pr_err("alloc_netdev: Unable to allocate device " + "with zero RX queues.\n"); + return NULL; + } +#endif + alloc_size = sizeof(struct net_device); if (sizeof_priv) { /* ensure 32-byte alignment of private area */ @@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); - dev->num_tx_queues = queue_count; - dev->real_num_tx_queues = queue_count; + dev->num_tx_queues = txqs; + dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) goto free_pcpu; #ifdef CONFIG_RPS - dev->num_rx_queues = queue_count; - dev->real_num_rx_queues = queue_count; + dev->num_rx_queues = rxqs; + dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_pcpu; #endif @@ -5711,7 +5720,7 @@ free_p: kfree(p); return NULL; } -EXPORT_SYMBOL(alloc_netdev_mq); +EXPORT_SYMBOL(alloc_netdev_mqs); /** * free_netdev - free network device diff --git a/net/core/filter.c b/net/core/filter.c index 2b27d4e..afc5837 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter); /** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on - * @filter: filter to apply + * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 750db57..a5f7535 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) return -EPERM; - if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { + if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 4508705..5fdb072 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq) { struct dccp_sock *dp = dccp_sk(sk); - dp->dccps_gsr = seq; + if (after48(seq, dp->dccps_gsr)) + dp->dccps_gsr = seq; /* Sequence validity window depends on remote Sequence Window (7.5.1) */ dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); /* diff --git a/net/dccp/input.c b/net/dccp/input.c index 15af247..8cde009 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) */ if (time_before(now, (dp->dccps_rate_last + sysctl_dccp_sync_ratelimit))) - return 0; + return -1; DCCP_WARN("Step 6 failed for %s packet, " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c index 5639438..4234882 100644 --- a/net/dccp/sysctl.c +++ b/net/dccp/sysctl.c @@ -21,7 +21,8 @@ /* Boundary values */ static int zero = 0, u8_max = 0xFF; -static unsigned long seqw_min = 32; +static unsigned long seqw_min = DCCPF_SEQ_WMIN, + seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */ static struct ctl_table dccp_default_table[] = { { @@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = { .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ + .extra2 = &seqw_max, }, { .procname = "rx_ccid", diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f00ef2f..f9d7ac9 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev) EXPORT_SYMBOL(ether_setup); /** - * alloc_etherdev_mq - Allocates and sets up an Ethernet device + * alloc_etherdev_mqs - Allocates and sets up an Ethernet device * @sizeof_priv: Size of additional driver-private structure to be allocated * for this Ethernet device - * @queue_count: The number of queues this device has. + * @txqs: The number of TX queues this device has. + * @txqs: The number of RX queues this device has. * * Fill in the fields of the device structure with Ethernet-generic * values. Basically does everything except registering the device. @@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup); * this private data area. */ -struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) +struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, + unsigned int rxqs) { - return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); + return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs); } -EXPORT_SYMBOL(alloc_etherdev_mq); +EXPORT_SYMBOL(alloc_etherdev_mqs); static size_t _format_mac_addr(char *buf, int buflen, const unsigned char *addr, int len) diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 880a5ec..86961be 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) skb->ip_summed = CHECKSUM_NONE; - ah = (struct ip_auth_hdr *)skb->data; - iph = ip_hdr(skb); - ihl = ip_hdrlen(skb); if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; + ah = (struct ip_auth_hdr *)skb->data; + iph = ip_hdr(skb); + ihl = ip_hdrlen(skb); + work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); if (!work_iph) goto out; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index a2fc7b9..04c8b69 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) return err; } +int arp_invalidate(struct net_device *dev, __be32 ip) +{ + struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); + int err = -ENXIO; + + if (neigh) { + if (neigh->nud_state & ~NUD_NOARP) + err = neigh_update(neigh, NULL, NUD_FAILED, + NEIGH_UPDATE_F_OVERRIDE| + NEIGH_UPDATE_F_ADMIN); + neigh_release(neigh); + } + + return err; +} +EXPORT_SYMBOL(arp_invalidate); + static int arp_req_delete_public(struct net *net, struct arpreq *r, struct net_device *dev) { @@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r, { int err; __be32 ip; - struct neighbour *neigh; if (r->arp_flags & ATF_PUBL) return arp_req_delete_public(net, r, dev); @@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, if (!dev) return -EINVAL; } - err = -ENXIO; - neigh = neigh_lookup(&arp_tbl, &ip, dev); - if (neigh) { - if (neigh->nud_state & ~NUD_NOARP) - err = neigh_update(neigh, NULL, NUD_FAILED, - NEIGH_UPDATE_F_OVERRIDE| - NEIGH_UPDATE_F_ADMIN); - neigh_release(neigh); - } - return err; + return arp_invalidate(dev, ip); } /* diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 25e3181..97e5fb7 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { if (!reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) { + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || sk2_rcv_saddr == sk_rcv_saddr(sk)) @@ -122,7 +122,8 @@ again: (tb->num_owners < smallest_size || smallest_size == -1)) { smallest_size = tb->num_owners; smallest_rover = rover; - if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { + if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && + !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { spin_unlock(&head->lock); snum = smallest_rover; goto have_snum; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 2ada171..2746c1f 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) nlmsg_len(nlh) < hdrlen) return -EINVAL; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (nlmsg_attrlen(nlh, hdrlen)) { struct nlattr *attr; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3fac340..e855fff 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t, struct arpt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) * about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name, struct arpt_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a846d63..652efea 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t, struct ipt_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU. - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; /* macro does multi eval of i */ } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ipt_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index ee82d4e..1aba54a 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) if (!pskb_may_pull(skb, ah_hlen)) goto out; - ip6h = ipv6_hdr(skb); - - skb_push(skb, hdr_len); if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; + ah = (struct ip_auth_hdr *)skb->data; + ip6h = ipv6_hdr(skb); + + skb_push(skb, hdr_len); + work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); if (!work_iph) goto out; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index e46305d..d144e62 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk, !sk2->sk_bound_dev_if || sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && (!sk->sk_reuse || !sk2->sk_reuse || - sk2->sk_state == TCP_LISTEN) && + ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && ipv6_rcv_saddr_equal(sk, sk2)) break; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 4555823..7d227c6 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t, struct ip6t_entry *iter; unsigned int cpu; unsigned int i; - unsigned int curcpu = get_cpu(); - - /* Instead of clearing (by a previous call to memset()) - * the counters and using adds, we set the counters - * with data used by 'current' CPU - * - * Bottom half has to be disabled to prevent deadlock - * if new softirq were to run and call ipt_do_table - */ - local_bh_disable(); - i = 0; - xt_entry_foreach(iter, t->entries[curcpu], t->size) { - SET_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); - ++i; - } - local_bh_enable(); - /* Processing counters from other cpus, we can let bottom half enabled, - * (preemption is disabled) - */ for_each_possible_cpu(cpu) { - if (cpu == curcpu) - continue; + seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; + i = 0; - local_bh_disable(); - xt_info_wrlock(cpu); xt_entry_foreach(iter, t->entries[cpu], t->size) { - ADD_COUNTER(counters[i], iter->counters.bcnt, - iter->counters.pcnt); + u64 bcnt, pcnt; + unsigned int start; + + do { + start = read_seqbegin(lock); + bcnt = iter->counters.bcnt; + pcnt = iter->counters.pcnt; + } while (read_seqretry(lock, start)); + + ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } - xt_info_wrunlock(cpu); - local_bh_enable(); } - put_cpu(); } static struct xt_counters *alloc_counters(const struct xt_table *table) @@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table) (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; - counters = vmalloc(countersize); + counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); @@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct ip6t_entry *iter; ret = 0; - counters = vmalloc(num_counters * sizeof(struct xt_counters)); + counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 0cdba50..5cb8d30 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; - rcu_read_lock(); + spin_lock_bh(&nf_conntrack_lock); last = (struct nf_conn *)cb->args[1]; for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { restart: - hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], + hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; ct = nf_ct_tuplehash_to_ctrack(h); - if (!atomic_inc_not_zero(&ct->ct_general.use)) - continue; /* Dump entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then dump everything. */ if (l3proto && nf_ct_l3num(ct) != l3proto) - goto releasect; + continue; if (cb->args[1]) { if (ct != last) - goto releasect; + continue; cb->args[1] = 0; } if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, @@ -681,8 +679,6 @@ restart: if (acct) memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); } -releasect: - nf_ct_put(ct); } if (cb->args[1]) { cb->args[1] = 0; @@ -690,7 +686,7 @@ releasect: } } out: - rcu_read_unlock(); + spin_unlock_bh(&nf_conntrack_lock); if (last) nf_ct_put(last); @@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb, u16 zone; int err; - if (nlh->nlmsg_flags & NLM_F_DUMP) + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, ctnetlink_done); @@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb, u16 zone; int err; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { return netlink_dump_start(ctnl, skb, nlh, ctnetlink_exp_dump_table, ctnetlink_exp_done); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 8046350..c942376 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1325,7 +1325,8 @@ static int __init xt_init(void) for_each_possible_cpu(i) { struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); - spin_lock_init(&lock->lock); + + seqlock_init(&lock->lock); lock->readers = 0; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 1781d99..f83cb37 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) security_netlink_recv(skb, CAP_NET_ADMIN)) return -EPERM; - if (nlh->nlmsg_flags & NLM_F_DUMP) { + if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (ops->dumpit == NULL) return -EOPNOTSUPP; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index fd95beb..1072b2c 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -37,7 +37,7 @@ /* Transport protocol registration */ static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; -static struct phonet_protocol *phonet_proto_get(int protocol) +static struct phonet_protocol *phonet_proto_get(unsigned int protocol) { struct phonet_protocol *pp; @@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = { static DEFINE_MUTEX(proto_tab_lock); -int __init_or_module phonet_proto_register(int protocol, +int __init_or_module phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp) { int err = 0; @@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol, } EXPORT_SYMBOL(phonet_proto_register); -void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) +void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp) { mutex_lock(&proto_tab_lock); BUG_ON(proto_tab[protocol] != pp); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 67dc7ce..83ddfc0 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); action = p->tcf_action; update_flags = p->update_flags; spin_unlock(&p->tcf_lock); diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8daef96..c2a7c20 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, spin_lock(&ipt->tcf_lock); ipt->tcf_tm.lastuse = jiffies; - ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); - ipt->tcf_bstats.packets++; + bstats_update(&ipt->tcf_bstats, skb); /* yes, we have to worry about both in and out dev worry later - danger - this API seems to have changed diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 0c311be..d765067 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, spin_lock(&m->tcf_lock); m->tcf_tm.lastuse = jiffies; - m->tcf_bstats.bytes += qdisc_pkt_len(skb); - m->tcf_bstats.packets++; + bstats_update(&m->tcf_bstats, skb); dev = m->tcfm_dev; if (!dev) { diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 186eb83..178a4bd 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, egress = p->flags & TCA_NAT_FLAG_EGRESS; action = p->tcf_action; - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index a0593c9..445bef7 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, bad: p->tcf_qstats.overlimits++; done: - p->tcf_bstats.bytes += qdisc_pkt_len(skb); - p->tcf_bstats.packets++; + bstats_update(&p->tcf_bstats, skb); spin_unlock(&p->tcf_lock); return p->tcf_action; } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 7ebf743..e2f08b1 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, spin_lock(&police->tcf_lock); - police->tcf_bstats.bytes += qdisc_pkt_len(skb); - police->tcf_bstats.packets++; + bstats_update(&police->tcf_bstats, skb); if (police->tcfp_ewma_rate && police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 97e84f3..7287cff 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; - d->tcf_bstats.bytes += qdisc_pkt_len(skb); - d->tcf_bstats.packets++; + bstats_update(&d->tcf_bstats, skb); /* print policy string followed by _ then packet count * Example if this was the 3rd packet and the string was "hello" diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 66cbf4e..836f5fe 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, spin_lock(&d->tcf_lock); d->tcf_tm.lastuse = jiffies; - d->tcf_bstats.bytes += qdisc_pkt_len(skb); - d->tcf_bstats.packets++; + bstats_update(&d->tcf_bstats, skb); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 2825407..943d733 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -422,10 +422,8 @@ drop: __maybe_unused } return ret; } - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; - flow->bstats.bytes += qdisc_pkt_len(skb); - flow->bstats.packets++; + qdisc_bstats_update(sch, skb); + bstats_update(&flow->bstats, skb); /* * Okay, this may seem weird. We pretend we've dropped the packet if * it goes via ATM. The reason for this is that the outer qdisc diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index eb76315..c80d1c2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); cbq_mark_toplevel(q, cl); if (!cl->next_alive) cbq_activate_class(cl); @@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ret = qdisc_enqueue(skb, cl->q); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); if (!cl->next_alive) cbq_activate_class(cl); return 0; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index aa8b531..de55e64 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; - unsigned int len; int err; cl = drr_classify(skb, sch, &err); @@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; } - len = qdisc_pkt_len(skb); err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl->deficit = cl->quantum; } - cl->bstats.packets++; - cl->bstats.bytes += len; - sch->bstats.packets++; - sch->bstats.bytes += len; + bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return err; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 1d295d6..60f4bdd 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; } - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 069c62b..2e45791 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (cl->qdisc->q.qlen == 1) set_active(cl, qdisc_pkt_len(skb)); - cl->bstats.packets++; - cl->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + bstats_update(&cl->bstats, skb); + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 01b519d..984c1b0 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } return ret; } else { - cl->bstats.packets += - skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; - cl->bstats.bytes += qdisc_pkt_len(skb); + bstats_update(&cl->bstats, skb); htb_activate(q, cl); } sch->q.qlen++; - sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } @@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, htb_add_to_wait_tree(q, cl, diff); } - /* update byte stats except for leaves which are already updated */ - if (cl->level) { - cl->bstats.bytes += bytes; - cl->bstats.packets += skb_is_gso(skb)? - skb_shinfo(skb)->gso_segs:1; - } + /* update basic stats except for leaves which are already updated */ + if (cl->level) + bstats_update(&cl->bstats, skb); + cl = cl->parent; } } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index f10e34a..bce1665 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) result = tc_classify(skb, p->filter_list, &res); - sch->bstats.packets++; - sch->bstats.bytes += qdisc_pkt_len(skb); + qdisc_bstats_update(sch, skb); switch (result) { case TC_ACT_SHOT: result = TC_ACT_SHOT; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 32690de..21f13da 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e5593c0..1c4bce8 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; } @@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) __skb_queue_after(list, skb, nskb); sch->qstats.backlog += qdisc_pkt_len(nskb); - sch->bstats.bytes += qdisc_pkt_len(nskb); - sch->bstats.packets++; + qdisc_bstats_update(sch, nskb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index b1c95bc..966158d 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a67ba3c..a6009c5 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index d54ac94..239ec53 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) { - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 641a30d..77565e7 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) } sch->q.qlen++; - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 106479a..af9360d 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) if (q->q.qlen < dev->tx_queue_len) { __skb_queue_tail(&q->q, skb); - sch->bstats.bytes += qdisc_pkt_len(skb); - sch->bstats.packets++; + qdisc_bstats_update(sch, skb); return NET_XMIT_SUCCESS; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 8eb8895..d5e1e0b 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -26,6 +26,7 @@ #include <net/sock.h> #include <net/xfrm.h> #include <net/netlink.h> +#include <net/ah.h> #include <asm/uaccess.h> #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #include <linux/in6.h> @@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) return -ENOSYS; - if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) + if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || + ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) return -EINVAL; *props = algo->desc.sadb_alg_id; @@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && - (nlh->nlmsg_flags & NLM_F_DUMP)) { + (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { if (link->dump == NULL) return -EINVAL; |