aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/configs/ezx_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig2
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/mach-at91/at91sam9260.c6
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c6
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c2
-rw-r--r--arch/arm/mach-mxs/include/mach/mxs.h1
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/smartreflex.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/palm27x.h4
-rw-r--r--arch/arm/mach-pxa/palm27x.c4
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/vpac270.c2
-rwxr-xr-xarch/arm/mach-s5pv210/cpufreq.c82
-rwxr-xr-xarch/arm/mach-s5pv210/herring-panel.c6
-rw-r--r--arch/arm/mach-s5pv210/herring-touchkey-led.c31
-rw-r--r--arch/arm/mach-s5pv210/include/mach/cpu-freq-v210.h6
-rwxr-xr-xarch/arm/mach-s5pv210/mach-herring.c22
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/cpu.c25
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/oprofile/common.c2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-v3.h10
-rw-r--r--arch/arm/plat-mxc/pwm.c16
-rw-r--r--arch/ia64/kernel/acpi.c10
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/sparsemem.h2
-rw-r--r--arch/powerpc/include/asm/synch.h1
-rw-r--r--arch/powerpc/include/asm/time.h2
-rw-r--r--arch/powerpc/kernel/irq.c15
-rw-r--r--arch/powerpc/kernel/kvm.c1
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/lib/feature-fixups.c23
-rw-r--r--arch/powerpc/mm/gup.c12
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c21
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c12
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c23
-rw-r--r--arch/powerpc/platforms/ps3/platform.h1
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/s390/kernel/ptrace.c48
-rw-r--r--arch/s390/kvm/kvm-s390.c14
-rw-r--r--arch/s390/mm/gup.c14
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/kernel/entry.S2
-rw-r--r--arch/sh/include/asm/page.h5
-rw-r--r--arch/sh/oprofile/common.c4
-rw-r--r--arch/sparc/include/asm/pgtable_32.h20
-rw-r--r--arch/sparc/include/asm/pgtable_64.h20
-rw-r--r--arch/sparc/kernel/entry.h7
-rw-r--r--arch/sparc/kernel/module.c27
-rw-r--r--arch/sparc/kernel/pci_sun4v.c4
-rw-r--r--arch/sparc/kernel/setup_64.c48
-rw-r--r--arch/sparc/kernel/signal32.c18
-rw-r--r--arch/sparc/kernel/signal_32.c30
-rw-r--r--arch/sparc/kernel/signal_64.c42
-rw-r--r--arch/sparc/kernel/visemul.c32
-rw-r--r--arch/sparc/lib/memcpy.S804
-rw-r--r--arch/sparc/mm/Makefile1
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/generic_32.c98
-rw-r--r--arch/sparc/mm/generic_64.c164
-rw-r--r--arch/um/drivers/ubd_kern.c31
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/timer.h23
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h39
-rw-r--r--arch/x86/kernel/amd_iommu.c2
-rw-r--r--arch/x86/kernel/amd_nb.c31
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c20
-rw-r--r--arch/x86/kernel/apic/probe_32.c10
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/hpet.c21
-rw-r--r--arch/x86/kernel/kprobes.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c24
-rw-r--r--arch/x86/kernel/mpparse.c2
-rw-r--r--arch/x86/kernel/reboot.c13
-rw-r--r--arch/x86/mm/gup.c12
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/mmap.c4
-rw-r--r--arch/x86/mm/srat.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c40
-rw-r--r--arch/x86/oprofile/init.c7
-rw-r--r--arch/x86/pci/Makefile3
-rw-r--r--arch/x86/pci/acpi.c18
-rw-r--r--arch/x86/pci/amd_bus.c42
-rw-r--r--arch/x86/platform/mrst/mrst.c24
-rw-r--r--arch/x86/platform/uv/tlb_uv.c30
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/setup.c18
110 files changed, 976 insertions, 1323 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7646084..66a7045 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1313,6 +1313,18 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU.
+config PL310_ERRATA_769419
+ bool "PL310 errata: no automatic Store Buffer drain"
+ depends on CACHE_L2X0
+ help
+ On revisions of the PL310 prior to r3p2, the Store Buffer does
+ not automatically drain. This can cause normal, non-cacheable
+ writes to be retained when the memory system is idle, leading
+ to suboptimal I/O performance for drivers using coherent DMA.
+ This option adds a write barrier to the cpu_idle loop so that,
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
endmenu
menu "Kernel Features"
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 227a477..d95763d 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -287,7 +287,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 176ec22..fd996bb 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -263,7 +263,7 @@ CONFIG_USB=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index a88e64d..443675d 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -132,7 +132,7 @@ CONFIG_USB_MON=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_USB_GADGETFS=m
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 59577ad..547a3c1 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_PXA27X=y
+CONFIG_USB_PXA27X=y
CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
CONFIG_USB_FILE_STORAGE=m
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0..673151c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -348,7 +348,7 @@ __secondary_data:
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
-#ifdef CONFIG_ALIGNMENT_TRAP
+#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 54c97ee..81c137d 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -232,6 +232,9 @@ void cpu_idle(void)
#endif
local_irq_disable();
+#ifdef CONFIG_PL310_ERRATA_769419
+ wmb();
+#endif
if (hlt_counter) {
local_irq_enable();
cpu_relax();
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 7d606b0..eeb9478 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -237,9 +237,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
- CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
- CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
};
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index e83cc86..f893617 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -748,7 +748,7 @@ static struct snd_platform_data da850_evm_snd_data = {
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_1,
+ .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index f6ac9ba..3cdd237 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -563,7 +563,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
int val;
u32 value;
- if (!vpif_vsclkdis_reg || !cpld_client)
+ if (!vpif_vidclkctl_reg || !cpld_client)
return -ENXIO;
val = i2c_smbus_read_byte(cpld_client);
@@ -571,7 +571,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
return val;
spin_lock_irqsave(&vpif_reg_lock, flags);
- value = __raw_readl(vpif_vsclkdis_reg);
+ value = __raw_readl(vpif_vidclkctl_reg);
if (mux_mode) {
val &= VPIF_INPUT_TWO_CHANNEL;
value |= VIDCH1CLK;
@@ -579,7 +579,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
val |= VPIF_INPUT_ONE_CHANNEL;
value &= ~VIDCH1CLK;
}
- __raw_writel(value, vpif_vsclkdis_reg);
+ __raw_writel(value, vpif_vidclkctl_reg);
spin_unlock_irqrestore(&vpif_reg_lock, flags);
err = i2c_smbus_write_byte(cpld_client, val);
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 5dcc59d..b3a7124 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg | (1 << clk->enable_shift)) { \
+ if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
diff --git a/arch/arm/mach-mxs/include/mach/mxs.h b/arch/arm/mach-mxs/include/mach/mxs.h
index 35a89dd..1332f73 100644
--- a/arch/arm/mach-mxs/include/mach/mxs.h
+++ b/arch/arm/mach-mxs/include/mach/mxs.h
@@ -30,6 +30,7 @@
*/
#define cpu_is_mx23() ( \
machine_is_mx23evk() || \
+ machine_is_stmp378x() || \
0)
#define cpu_is_mx28() ( \
machine_is_mx28evk() || \
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 19d5891..841ae21 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -326,6 +326,7 @@ config MACH_OMAP4_PANDA
config OMAP3_EMU
bool "OMAP3 debugging peripherals"
depends on ARCH_OMAP3
+ select ARM_AMBA
select OC_ETM
help
Say Y here to enable debugging hardware of omap3
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 88bd6f7..c565971 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -133,7 +133,7 @@ static struct platform_device rx51_charger_device = {
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index fb7dc52..f5a6bc1 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -137,7 +137,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
sr_write_reg(sr_info, ERRCONFIG_V1, status);
} else if (sr_info->ip_type == SR_TYPE_V2) {
/* Read the status bits */
- sr_read_reg(sr_info, IRQSTATUS);
+ status = sr_read_reg(sr_info, IRQSTATUS);
/* Clear them by writing back */
sr_write_reg(sr_info, IRQSTATUS, status);
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 810a982..6ca327d 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static void balloon3_udc_command(int cmd)
{
if (cmd == PXA2XX_UDC_CMD_CONNECT)
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index ff9ff5f..fdf611c 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -147,7 +147,7 @@ static void __init colibri_pxa320_init_eth(void)
static inline void __init colibri_pxa320_init_eth(void) {}
#endif /* CONFIG_AX88796 */
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
.gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
.gpio_pullup = -1,
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index d65e4bd..b9e8233 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
}
#endif
-#ifdef CONFIG_USB_GADGET_PXA25X
+#ifdef CONFIG_USB_PXA25X
static struct gpio_vbus_mach_info gumstix_udc_info = {
.gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
.gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
index 0a5e5ea..8d56043 100644
--- a/arch/arm/mach-pxa/include/mach/palm27x.h
+++ b/arch/arm/mach-pxa/include/mach/palm27x.h
@@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
static inline void palm27x_lcd_init(int power, struct pxafb_mode_info *mode) {}
#endif
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
extern void __init palm27x_udc_init(int vbus, int pullup,
int vbus_inverted);
#else
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 325c245..fbc10d7 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X) || \
- defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X) || \
+ defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info palm27x_udc_info = {
.gpio_vbus_inverted = 1,
};
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index fb06bd0..5193ce2 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -339,7 +339,7 @@ static inline void palmtc_mkp_init(void) {}
/******************************************************************************
* UDC
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
+#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
static struct gpio_vbus_mach_info palmtc_udc_info = {
.gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
.gpio_vbus_inverted = 1,
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index 67bd414..10b80d4 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
/******************************************************************************
* USB Gadget
******************************************************************************/
-#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
+#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
.gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
.gpio_pullup = -1,
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index 4e11f99..2c641b3 100755
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -32,10 +32,11 @@ static struct clk *dmc1_clk;
static struct cpufreq_freqs freqs;
static DEFINE_MUTEX(set_freq_lock);
-/* APLL M,P,S values for 1G/800Mhz */
+/* APLL M,P,S values for 1.2G/800Mhz */
+#define APLL_VAL_1200 ((1 << 31) | (150 << 16) | (3 << 8) | 1)
#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
+#define APLL_VAL_1100 ((1 << 31) | (275 << 16) | (6 << 8) | 1)
#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
-
#define SLEEP_FREQ (800 * 1000) /* Use 800MHz when entering sleep */
/*
@@ -62,7 +63,7 @@ struct dram_conf {
static struct dram_conf s5pv210_dram_conf[2];
enum perf_level {
- L0, L1, L2, L3, L4,
+ L0, L1, L2, L3, L4, L5, L6,
};
enum s5pv210_mem_type {
@@ -77,11 +78,13 @@ enum s5pv210_dmc_port {
};
static struct cpufreq_frequency_table s5pv210_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 200*1000},
- {L4, 100*1000},
+ {L0, 1200*1000},
+ {L1, 1100*1000},
+ {L2, 1000*1000},
+ {L3, 800*1000},
+ {L4, 400*1000},
+ {L5, 200*1000},
+ {L6, 100*1000},
{0, CPUFREQ_TABLE_END},
};
@@ -93,33 +96,41 @@ struct s5pv210_dvs_conf {
unsigned long int_volt; /* uV */
};
-const unsigned long arm_volt_max = 1350000;
+const unsigned long arm_volt_max = 1550000;
const unsigned long int_volt_max = 1250000;
static struct s5pv210_dvs_conf dvs_conf[] = {
[L0] = {
+ .arm_volt = 1350000,
+ .int_volt = 1175000,
+ },
+ [L1] = {
+ .arm_volt = 1300000,
+ .int_volt = 1175000,
+ },
+ [L2] = {
.arm_volt = 1250000,
.int_volt = 1100000,
},
- [L1] = {
+ [L3] = {
.arm_volt = 1200000,
.int_volt = 1100000,
},
- [L2] = {
+ [L4] = {
.arm_volt = 1050000,
.int_volt = 1100000,
},
- [L3] = {
+ [L5] = {
.arm_volt = 950000,
.int_volt = 1100000,
},
- [L4] = {
+ [L6] = {
.arm_volt = 950000,
.int_volt = 1000000,
},
};
-static u32 clkdiv_val[5][11] = {
+static u32 clkdiv_val[7][11] = {
/*
* Clock divider value for following
* { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
@@ -127,19 +138,25 @@ static u32 clkdiv_val[5][11] = {
* ONEDRAM, MFC, G3D }
*/
- /* L0 : [1000/200/100][166/83][133/66][200/200] */
+ /* L0 : [1200/200/200/100][166/83][133/66][200/200] */
+ {0, 5, 5, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L1 : [1100/200/200/100][166/83][133/66][200/200] */
+ {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
+
+ /* L2 : [1000/200/100][166/83][133/66][200/200] */
{0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
- /* L1 : [800/200/100][166/83][133/66][200/200] */
+ /* L3 : [800/200/100][166/83][133/66][200/200] */
{0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
- /* L2 : [400/200/100][166/83][133/66][200/200] */
+ /* L4 : [400/200/100][166/83][133/66][200/200] */
{1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
- /* L3 : [200/200/100][166/83][133/66][200/200] */
+ /* L5 : [200/200/100][166/83][133/66][200/200] */
{3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
- /* L4 : [100/100/100][83/83][66/66][100/100] */
+ /* L6 : [100/100/100][83/83][66/66][100/100] */
{7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
};
@@ -260,11 +277,11 @@ static int s5pv210_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* Check if there need to change PLL */
- if ((index == L0) || (priv_index == L0))
+ if ((index <= L2) || (priv_index <= L2))
pll_changing = 1;
/* Check if there need to change System bus clock */
- if ((index == L4) || (priv_index == L4))
+ if ((index == L6) || (priv_index == L6))
bus_speed_changing = 1;
if (bus_speed_changing) {
@@ -273,11 +290,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
* temporary clock while changing divider.
* expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
*/
- if (pll_changing)
- s5pv210_set_refresh(DMC1, 83000);
- else
- s5pv210_set_refresh(DMC1, 100000);
-
+ s5pv210_set_refresh(DMC1, 100000);
s5pv210_set_refresh(DMC0, 83000);
}
@@ -363,7 +376,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
/* ARM MCS value changed */
reg = __raw_readl(S5P_ARM_MCS_CON);
reg &= ~0x3;
- if (index >= L3)
+ if (index >= L6)
reg |= 0x3;
else
reg |= 0x1;
@@ -380,8 +393,10 @@ static int s5pv210_target(struct cpufreq_policy *policy,
* 6-2. Wait untile the PLL is locked
*/
if (index == L0)
- __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
- else
+ __raw_writel(APLL_VAL_1200, S5P_APLL_CON);
+ else if (index == L1)
+ __raw_writel(APLL_VAL_1100, S5P_APLL_CON);
+ else
__raw_writel(APLL_VAL_800, S5P_APLL_CON);
do {
@@ -452,7 +467,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
} while (reg & (1 << 15));
/* Reconfigure DRAM refresh counter value */
- if (index != L4) {
+ if (index != L6) {
/*
* DMC0 : 166Mhz
* DMC1 : 200Mhz
@@ -558,7 +573,11 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 40000;
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ /* set default min and max policies to safe speeds */
+ policy->max = 1000000;
+ policy->min = 200000;
+ return 0;
}
static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
@@ -602,6 +621,7 @@ static struct cpufreq_driver s5pv210_driver = {
.get = s5pv210_getspeed,
.init = s5pv210_cpu_init,
.name = "s5pv210",
+ .attr = s5pv210_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = s5pv210_cpufreq_suspend,
.resume = s5pv210_cpufreq_resume,
diff --git a/arch/arm/mach-s5pv210/herring-panel.c b/arch/arm/mach-s5pv210/herring-panel.c
index 8f69681..e84d6e8 100755
--- a/arch/arm/mach-s5pv210/herring-panel.c
+++ b/arch/arm/mach-s5pv210/herring-panel.c
@@ -163,7 +163,11 @@ static const struct tl2796_gamma_adj_points gamma_adj_points = {
.v255 = BV_255,
};
+#ifdef CONFIG_FB_VOODOO
+struct gamma_entry gamma_table[] = {
+#else
static const struct gamma_entry gamma_table[] = {
+#endif
{ BV_0, { 4200000, 4200000, 4200000, }, },
{ 1, { 3994200, 4107600, 3910200, }, },
{ 0x00000400, { 3669486, 3738030, 3655093, }, },
@@ -353,6 +357,7 @@ struct s5p_panel_data herring_panel_data = {
0x0b8,
0x0fc,
},
+#ifdef CONFIG_TL2796_CONVERT_COLORS_RES
.color_adj = {
/* Convert from 8500K to D65, assuming:
* Rx 0.66950, Ry 0.33100
@@ -366,6 +371,7 @@ struct s5p_panel_data herring_panel_data = {
},
.rshift = 31,
},
+#endif
.gamma_adj_points = &gamma_adj_points,
.gamma_table = gamma_table,
diff --git a/arch/arm/mach-s5pv210/herring-touchkey-led.c b/arch/arm/mach-s5pv210/herring-touchkey-led.c
index b36e0f0..91d8623 100644
--- a/arch/arm/mach-s5pv210/herring-touchkey-led.c
+++ b/arch/arm/mach-s5pv210/herring-touchkey-led.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/earlysuspend.h>
+#include <linux/bln.h>
#include <asm/mach-types.h>
#include "herring.h"
@@ -29,6 +30,23 @@ static void herring_touchkey_led_onoff(int onoff)
gpio_direction_output(S5PV210_GPJ3(led_gpios[i]), !!onoff);
}
+#ifdef CONFIG_GENERIC_BLN
+static void herring_touchkey_bln_enable(void)
+{
+ herring_touchkey_led_onoff(1);
+}
+
+static void herring_touchkey_bln_disable(void)
+{
+ herring_touchkey_led_onoff(0);
+}
+
+static struct bln_implementation herring_touchkey_bln = {
+ .enable = herring_touchkey_bln_enable,
+ .disable = herring_touchkey_bln_disable,
+};
+#endif
+
static void herring_touchkey_led_early_suspend(struct early_suspend *h)
{
herring_touchkey_led_onoff(0);
@@ -49,24 +67,31 @@ static int __init herring_init_touchkey_led(void)
{
int i;
int ret = 0;
+ u32 gpio;
if (!machine_is_herring() || !herring_is_tft_dev())
return 0;
for (i = 0; i < ARRAY_SIZE(led_gpios); i++) {
- ret = gpio_request(S5PV210_GPJ3(led_gpios[i]), "touchkey led");
+ gpio = S5PV210_GPJ3(led_gpios[i]);
+ ret = gpio_request(gpio, "touchkey led");
if (ret) {
pr_err("Failed to request touchkey led gpio %d\n", i);
goto err_req;
}
- s3c_gpio_setpull(S5PV210_GPJ3(led_gpios[i]),
- S3C_GPIO_PULL_NONE);
+ s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
+ s3c_gpio_slp_cfgpin(gpio, S3C_GPIO_SLP_PREV);
+ s3c_gpio_slp_setpull_updown(gpio, S3C_GPIO_PULL_NONE);
}
herring_touchkey_led_onoff(1);
register_early_suspend(&early_suspend);
+#ifdef CONFIG_GENERIC_BLN
+ register_bln_implementation(&herring_touchkey_bln);
+#endif
+
return 0;
err_req:
diff --git a/arch/arm/mach-s5pv210/include/mach/cpu-freq-v210.h b/arch/arm/mach-s5pv210/include/mach/cpu-freq-v210.h
index 8274a01..449c753 100644
--- a/arch/arm/mach-s5pv210/include/mach/cpu-freq-v210.h
+++ b/arch/arm/mach-s5pv210/include/mach/cpu-freq-v210.h
@@ -26,6 +26,12 @@ struct s5pv210_cpufreq_data {
unsigned int size;
};
+/* Make sure we have the scaling_available_freqs sysfs file */
+static struct freq_attr *s5pv210_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
extern void s5pv210_cpufreq_set_platdata(struct s5pv210_cpufreq_data *pdata);
#endif /* __ASM_ARCH_CPU_FREQ_H */
diff --git a/arch/arm/mach-s5pv210/mach-herring.c b/arch/arm/mach-s5pv210/mach-herring.c
index a1f157c..2c3286d 100755
--- a/arch/arm/mach-s5pv210/mach-herring.c
+++ b/arch/arm/mach-s5pv210/mach-herring.c
@@ -710,7 +710,7 @@ static struct regulator_init_data herring_buck1_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
- .uV = 1250000,
+ .uV = 1472000,
.mode = REGULATOR_MODE_NORMAL,
.disabled = 1,
},
@@ -728,7 +728,7 @@ static struct regulator_init_data herring_buck2_data = {
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
- .uV = 1100000,
+ .uV = 1250000,
.mode = REGULATOR_MODE_NORMAL,
.disabled = 1,
},
@@ -2008,6 +2008,23 @@ static void touch_keypad_onoff(int onoff)
msleep(50);
}
+static void touch_keypad_gpio_sleep(int onoff)
+{
+ if (onoff == TOUCHKEY_ON) {
+ /*
+ * reconfigure gpio to activate touchkey controller vdd in sleep mode
+ */
+ s3c_gpio_slp_cfgpin(_3_GPIO_TOUCH_EN, S3C_GPIO_SLP_OUT1);
+ } else {
+ /*
+ * reconfigure gpio to deactivate touchkey vdd in sleep mode,
+ * this is the default
+ */
+ s3c_gpio_slp_cfgpin(_3_GPIO_TOUCH_EN, S3C_GPIO_SLP_OUT0);
+ }
+
+}
+
static const int touch_keypad_code[] = {
KEY_MENU,
KEY_HOME,
@@ -2019,6 +2036,7 @@ static struct touchkey_platform_data touchkey_data = {
.keycode_cnt = ARRAY_SIZE(touch_keypad_code),
.keycode = touch_keypad_code,
.touchkey_onoff = touch_keypad_onoff,
+ .touchkey_sleep_onoff = touch_keypad_gpio_sleep,
.fw_name = "cypress-touchkey.bin",
.scl_pin = _3_TOUCH_SCL_28V,
.sda_pin = _3_TOUCH_SDA_28V,
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 9a9706c..6ebdb0d 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -7,6 +7,7 @@ config UX500_SOC_COMMON
select HAS_MTU
select ARM_ERRATA_753970
select ARM_ERRATA_754322
+ select ARM_ERRATA_764369
menu "Ux500 SoC"
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 1da23bb..8aa104a 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -99,7 +99,27 @@ static void ux500_l2x0_inv_all(void)
ux500_cache_sync();
}
-static int ux500_l2x0_init(void)
+static int __init ux500_l2x0_unlock(void)
+{
+ int i;
+
+ /*
+ * Unlock Data and Instruction Lock if locked. Ux500 U-Boot versions
+ * apparently locks both caches before jumping to the kernel. The
+ * l2x0 core will not touch the unlock registers if the l2x0 is
+ * already enabled, so we do it right here instead. The PL310 has
+ * 8 sets of registers, one per possible CPU.
+ */
+ for (i = 0; i < 8; i++) {
+ writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+ i * L2X0_LOCKDOWN_STRIDE);
+ writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+ i * L2X0_LOCKDOWN_STRIDE);
+ }
+ return 0;
+}
+
+static int __init ux500_l2x0_init(void)
{
if (cpu_is_u5500())
l2x0_base = __io_address(U5500_L2CC_BASE);
@@ -108,6 +128,9 @@ static int ux500_l2x0_init(void)
else
ux500_unknown_soc();
+ /* Unlock before init */
+ ux500_l2x0_unlock();
+
/* 64KB way size, 8 way associativity, force WA */
l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 089c0b5..b6ba103 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -270,10 +270,6 @@ cpu_resume_l1_flags:
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
- * We automatically detect if we have a Harvard cache, and use the
- * Harvard cache control instructions insead of the unified cache
- * control instructions.
- *
* This should be able to cover all ARMv7 cores.
*
* It is assumed that:
@@ -363,9 +359,7 @@ __v7_setup:
#endif
3: mov r10, #0
-#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
-#endif
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index c074e66..4e0a371 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
}
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v3.h b/arch/arm/plat-mxc/include/mach/iomux-v3.h
index ebbce33..4509956 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v3.h
@@ -89,11 +89,11 @@ typedef u64 iomux_v3_cfg_t;
#define PAD_CTL_HYS (1 << 8)
#define PAD_CTL_PKE (1 << 7)
-#define PAD_CTL_PUE (1 << 6)
-#define PAD_CTL_PUS_100K_DOWN (0 << 4)
-#define PAD_CTL_PUS_47K_UP (1 << 4)
-#define PAD_CTL_PUS_100K_UP (2 << 4)
-#define PAD_CTL_PUS_22K_UP (3 << 4)
+#define PAD_CTL_PUE (1 << 6 | PAD_CTL_PKE)
+#define PAD_CTL_PUS_100K_DOWN (0 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_47K_UP (1 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_100K_UP (2 << 4 | PAD_CTL_PUE)
+#define PAD_CTL_PUS_22K_UP (3 << 4 | PAD_CTL_PUE)
#define PAD_CTL_ODE (1 << 3)
diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c
index 7a61ef8..f4b68be 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/arch/arm/plat-mxc/pwm.c
@@ -32,6 +32,9 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN (1 << 24)
+#define MX3_PWMCR_WAITEN (1 << 23)
+#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
do_div(c, period_ns);
duty_cycles = c;
+ /*
+ * according to imx pwm RM, the real period value should be
+ * PERIOD value in PWMPR plus 2.
+ */
+ if (period_cycles > 2)
+ period_cycles -= 2;
+ else
+ period_cycles = 0;
+
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER(prescale) |
+ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 3be485a..f19de9f 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
static struct acpi_table_slit __initdata *slit_table;
cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int __init
+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
{
int pxm;
pxm = pa->proximity_domain_lo;
- if (ia64_platform_is("sn2"))
+ if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
pxm += pa->proximity_domain_hi[0] << 8;
return pxm;
}
-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int __init
+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
{
int pxm;
pxm = ma->proximity_domain;
- if (!ia64_platform_is("sn2"))
+ if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
pxm &= 0xff;
return pxm;
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 6fbce72..a0f358d 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,7 +8,7 @@
#ifdef __powerpc64__
-extern char _end[];
+extern char __end_interrupts[];
static inline int in_kernel_text(unsigned long addr)
{
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 54a47ea..0c5fa31 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -16,7 +16,7 @@
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_MEMORY_HOTPLUG
-extern void create_section_mapping(unsigned long start, unsigned long end);
+extern int create_section_mapping(unsigned long start, unsigned long end);
extern int remove_section_mapping(unsigned long start, unsigned long end);
#ifdef CONFIG_NUMA
extern int hot_add_scn_to_nid(unsigned long scn_addr);
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index d7cab44..87878c6 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -13,6 +13,7 @@
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
+extern void do_final_fixups(void);
static inline void eieio(void)
{
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index fe6f7c2..bc3c745 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
extern void secondary_cpu_time_init(void);
extern void iSeries_time_init_early(void);
+extern void decrementer_check_overflow(void);
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b428e3..ca2987d 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -170,16 +170,13 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
local_paca->hard_enabled = en;
-#ifndef CONFIG_BOOKE
- /* On server, re-trigger the decrementer if it went negative since
- * some processors only trigger on edge transitions of the sign bit.
- *
- * BookE has a level sensitive decrementer (latches in TSR) so we
- * don't need that
+ /*
+ * Trigger the decrementer if we have a pending event. Some processors
+ * only trigger on edge transitions of the sign bit. We might also
+ * have disabled interrupts long enough that the decrementer wrapped
+ * to positive.
*/
- if ((int)mfspr(SPRN_DEC) < 0)
- mtspr(SPRN_DEC, 1);
-#endif /* CONFIG_BOOKE */
+ decrementer_check_overflow();
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index b06bdae..ad892f7 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -131,7 +131,6 @@ static void kvm_patch_ins_b(u32 *inst, int addr)
/* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */
- extern u32 __end_interrupts;
if ((ulong)inst < (ulong)&__end_interrupts)
return;
#endif
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 620d792..c7e7b8c 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -107,6 +107,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___lwsync_fixup),
PTRRELOC(&__stop___lwsync_fixup));
+ do_final_fixups();
+
return KERNELBASE + offset;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a88bf27..7867fd1 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -352,6 +352,7 @@ void __init setup_system(void)
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features,
&__start___lwsync_fixup, &__stop___lwsync_fixup);
+ do_final_fixups();
/*
* Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 03b29a6..2de304a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -889,6 +889,15 @@ static void __init clocksource_init(void)
clock->name, clock->mult, clock->shift);
}
+void decrementer_check_overflow(void)
+{
+ u64 now = get_tb_or_rtc();
+ struct decrementer_clock *decrementer = &__get_cpu_var(decrementers);
+
+ if (now >= decrementer->next_tb)
+ set_dec(1);
+}
+
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 0d08d01..7a8a748 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -18,6 +18,8 @@
#include <linux/init.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/page.h>
+#include <asm/sections.h>
struct fixup_entry {
@@ -128,6 +130,27 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
+void do_final_fixups(void)
+{
+#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
+ int *src, *dest;
+ unsigned long length;
+
+ if (PHYSICAL_START == 0)
+ return;
+
+ src = (int *)(KERNELBASE + PHYSICAL_START);
+ dest = (int *)KERNELBASE;
+ length = (__end_interrupts - _stext) / sizeof(int);
+
+ while (length--) {
+ patch_instruction(dest, *src);
+ src++;
+ dest++;
+ }
+#endif
+}
+
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index fec1320..d7efdbf 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -16,16 +16,6 @@
#ifdef __HAVE_ARCH_PTE_SPECIAL
-static inline void get_huge_page_tail(struct page *page)
-{
- /*
- * __split_huge_page_refcount() cannot run
- * from under us.
- */
- VM_BUG_ON(atomic_read(&page->_count) < 0);
- atomic_inc(&page->_count);
-}
-
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
@@ -57,8 +47,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
put_page(page);
return 0;
}
- if (PageTail(page))
- get_huge_page_tail(page);
pages[*nr] = page;
(*nr)++;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 26b2872..07f9e9f 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -534,11 +534,11 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void create_section_mapping(unsigned long start, unsigned long end)
+int create_section_mapping(unsigned long start, unsigned long end)
{
- BUG_ON(htab_bolt_mapping(start, end, __pa(start),
+ return htab_bolt_mapping(start, end, __pa(start),
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
- mmu_kernel_ssize));
+ mmu_kernel_ssize);
}
int remove_section_mapping(unsigned long start, unsigned long end)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0b9a5c1..da5eb38 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -390,7 +390,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
{
unsigned long mask;
unsigned long pte_end;
- struct page *head, *page;
+ struct page *head, *page, *tail;
pte_t pte;
int refs;
@@ -413,6 +413,7 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
head = pte_page(pte);
page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
+ tail = page;
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
@@ -428,10 +429,20 @@ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long add
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
/* Could be optimized better */
- while (*nr) {
- put_page(page);
- (*nr)--;
- }
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+ return 0;
+ }
+
+ /*
+ * Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
}
return 1;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 29d4dde..278ec8e 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -123,7 +123,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
pgdata = NODE_DATA(nid);
start = (unsigned long)__va(start);
- create_section_mapping(start, start + size);
+ if (create_section_mapping(start, start + size))
+ return -EINVAL;
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones;
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 3bafc3d..4ff587e 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -136,8 +136,8 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
if (!mm || !acop)
return -EINVAL;
- /* We need to make sure mm_users doesn't change */
- down_read(&mm->mmap_sem);
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
if (mm->context.cop_pid == COP_PID_NONE) {
@@ -164,7 +164,7 @@ int use_cop(unsigned long acop, struct mm_struct *mm)
out:
spin_unlock(mm->context.cop_lockp);
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->page_table_lock);
return ret;
}
@@ -185,8 +185,8 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
if (WARN_ON_ONCE(!mm))
return;
- /* We need to make sure mm_users doesn't change */
- down_read(&mm->mmap_sem);
+ /* The page_table_lock ensures mm_users won't change under us */
+ spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
mm->context.acop &= ~acop;
@@ -213,7 +213,7 @@ void drop_cop(unsigned long acop, struct mm_struct *mm)
}
spin_unlock(mm->context.cop_lockp);
- up_read(&mm->mmap_sem);
+ spin_unlock(&mm->page_table_lock);
}
EXPORT_SYMBOL_GPL(drop_cop);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2164006..2c1ae7a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1214,11 +1214,12 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
break;
}
- of_node_put(memory);
if (nid >= 0)
break;
}
+ of_node_put(memory);
+
return nid;
}
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 600ed2c..1aa478b 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -88,6 +88,7 @@ struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
u64 ppe_id;
u64 thread_id;
+ unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
@@ -144,7 +145,11 @@ static void ps3_chip_unmask(struct irq_data *d)
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
- lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
+
+ /* non-IPIs are EOIed here. */
+
+ if (!test_bit(63 - d->irq, &pd->ipi_mask))
+ lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
@@ -691,6 +696,16 @@ void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
cpu, virq, pd->bmp.ipi_debug_brk_mask);
}
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
+{
+ struct ps3_private *pd = &per_cpu(ps3_private, cpu);
+
+ set_bit(63 - virq, &pd->ipi_mask);
+
+ DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
+ cpu, virq, pd->ipi_mask);
+}
+
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = &__get_cpu_var(ps3_private);
@@ -720,6 +735,12 @@ static unsigned int ps3_get_irq(void)
BUG();
}
#endif
+
+ /* IPIs are EOIed here. */
+
+ if (test_bit(63 - plug, &pd->ipi_mask))
+ lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
+
return plug;
}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 9a196a8..1a633ed 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -43,6 +43,7 @@ void ps3_mm_shutdown(void);
void ps3_init_IRQ(void);
void ps3_shutdown_IRQ(int cpu);
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq);
+void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
/* smp */
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 4c44794..f609345 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -94,6 +94,8 @@ static void __init ps3_smp_setup_cpu(int cpu)
if (result)
virqs[i] = NO_IRQ;
+ else
+ ps3_register_ipi_irq(cpu, virqs[i]);
}
ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_DEBUGGER_BREAK]);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 57ceb92..82766e5 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -112,6 +112,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
dlpar_free_one_cc_node(dn);
}
+#define COMPLETE 0
#define NEXT_SIBLING 1
#define NEXT_CHILD 2
#define NEXT_PROPERTY 3
@@ -158,6 +159,9 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
spin_unlock(&rtas_data_buf_lock);
switch (rc) {
+ case COMPLETE:
+ break;
+
case NEXT_SIBLING:
dn = dlpar_parse_cc_node(ccwa);
if (!dn)
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 46b55cf..3608704 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1338,7 +1338,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void)
{
if (machine_is(pseries))
- proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
+ proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}
__initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index f106662..c9311cf 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
if (opcode > MAX_HCALL_OPCODE)
return;
- h = &get_cpu_var(hcall_stats)[opcode / 4];
+ h = &__get_cpu_var(hcall_stats)[opcode / 4];
h->tb_start = mftb();
h->purr_start = mfspr(SPRN_PURR);
}
@@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
h->num_calls++;
h->tb_total += mftb() - h->tb_start;
h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
-
- put_cpu_var(hcall_stats);
}
static int __init hcall_inst_init(void)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index ed96b37..81e30d9 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -745,6 +745,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
goto out;
(*depth)++;
+ preempt_disable();
trace_hcall_entry(opcode, args);
(*depth)--;
@@ -767,6 +768,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
(*depth)++;
trace_hcall_exit(opcode, retval, retbuf);
+ preempt_enable();
(*depth)--;
out:
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ef86ad2..5804cfa 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,29 +47,31 @@ enum s390_regset {
void update_per_regs(struct task_struct *task)
{
- static const struct per_regs per_single_step = {
- .control = PER_EVENT_IFETCH,
- .start = 0,
- .end = PSW_ADDR_INSN,
- };
struct pt_regs *regs = task_pt_regs(task);
struct thread_struct *thread = &task->thread;
- const struct per_regs *new;
- struct per_regs old;
-
- /* TIF_SINGLE_STEP overrides the user specified PER registers. */
- new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
- &per_single_step : &thread->per_user;
+ struct per_regs old, new;
+
+ /* Copy user specified PER registers */
+ new.control = thread->per_user.control;
+ new.start = thread->per_user.start;
+ new.end = thread->per_user.end;
+
+ /* merge TIF_SINGLE_STEP into user specified PER registers. */
+ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
+ new.control |= PER_EVENT_IFETCH;
+ new.start = 0;
+ new.end = PSW_ADDR_INSN;
+ }
/* Take care of the PER enablement bit in the PSW. */
- if (!(new->control & PER_EVENT_MASK)) {
+ if (!(new.control & PER_EVENT_MASK)) {
regs->psw.mask &= ~PSW_MASK_PER;
return;
}
regs->psw.mask |= PSW_MASK_PER;
__ctl_store(old, 9, 11);
- if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
- __ctl_load(*new, 9, 11);
+ if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
+ __ctl_load(new, 9, 11);
}
void user_enable_single_step(struct task_struct *task)
@@ -895,6 +897,14 @@ static int s390_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
#endif
static const struct user_regset s390_regsets[] = {
@@ -921,6 +931,7 @@ static const struct user_regset s390_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_last_break_get,
+ .set = s390_last_break_set,
},
#endif
};
@@ -1078,6 +1089,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
return 0;
}
+static int s390_compat_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
@@ -1101,6 +1120,7 @@ static const struct user_regset s390_compat_regsets[] = {
.size = sizeof(long),
.align = sizeof(long),
.get = s390_compat_last_break_get,
+ .set = s390_compat_last_break_set,
},
[REGSET_GENERAL_EXTENDED] = {
.core_note_type = NT_S390_HIGH_GPRS,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 67345ae..2ada634 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -301,11 +301,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
- struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
- int rc = -ENOMEM;
+ struct kvm_vcpu *vcpu;
+ int rc = -EINVAL;
+
+ if (id >= KVM_MAX_VCPUS)
+ goto out;
+
+ rc = -ENOMEM;
+ vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
if (!vcpu)
- goto out_nomem;
+ goto out;
vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
get_zeroed_page(GFP_KERNEL);
@@ -341,7 +347,7 @@ out_free_sie_block:
free_page((unsigned long)(vcpu->arch.sie_block));
out_free_cpu:
kfree(vcpu);
-out_nomem:
+out:
return ERR_PTR(rc);
}
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 45b405c..65cb06e 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -52,7 +52,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask, result;
- struct page *head, *page;
+ struct page *head, *page, *tail;
int refs;
result = write ? 0 : _SEGMENT_ENTRY_RO;
@@ -64,6 +64,7 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
refs = 0;
head = pmd_page(pmd);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ tail = page;
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
@@ -81,6 +82,17 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
*nr -= refs;
while (refs--)
put_page(head);
+ return 0;
+ }
+
+ /*
+ * Any tail page need their mapcount reference taken before we
+ * return.
+ */
+ while (refs--) {
+ if (PageTail(tail))
+ get_huge_page_tail(tail);
+ tail++;
}
return 1;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37a23c22..458893f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -291,8 +291,9 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
void __tlb_remove_table(void *_table)
{
- void *table = (void *)((unsigned long) _table & PAGE_MASK);
- unsigned type = (unsigned long) _table & ~PAGE_MASK;
+ const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
+ void *table = (void *)((unsigned long) _table & ~mask);
+ unsigned type = (unsigned long) _table & mask;
if (type)
__page_table_free_rcu(table, type);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 0e358c2..422110a 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -90,7 +90,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (oprofile_started)
diff --git a/arch/score/kernel/entry.S b/arch/score/kernel/entry.S
index 577abba..83bb960 100644
--- a/arch/score/kernel/entry.S
+++ b/arch/score/kernel/entry.S
@@ -408,7 +408,7 @@ ENTRY(handle_sys)
sw r9, [r0, PT_EPC]
cmpi.c r27, __NR_syscalls # check syscall number
- bgtu illegal_syscall
+ bgeu illegal_syscall
slli r8, r27, 2 # get syscall routine
la r11, sys_call_table
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 822d608..abcc4dc 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -141,8 +141,13 @@ typedef struct page *pgtable_t;
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_UNCACHED_MAPPING
+#if defined(CONFIG_29BIT)
+#define UNCAC_ADDR(addr) P2SEGADDR(addr)
+#define CAC_ADDR(addr) P1SEGADDR(addr)
+#else
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
+#endif
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c
index b4c2d2b..e4dd5d5 100644
--- a/arch/sh/oprofile/common.c
+++ b/arch/sh/oprofile/common.c
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
return oprofile_perf_init(ops);
}
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
{
oprofile_perf_exit();
kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->backtrace = sh_backtrace;
return -ENODEV;
}
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 5b31a8e..a790cc6 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap;
#define kern_addr_valid(addr) \
(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
-extern int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long from, unsigned long pfn,
- unsigned long size, pgprot_t prot);
-
/*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
* its high 4 bits. These macros/functions put it there or get it from there.
@@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma,
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+ unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long long offset, space, phys_base;
+
+ offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
+ space = GET_IOSPACE(pfn);
+ phys_base = offset | (space << 32ULL);
+
+ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
({ \
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1e03c5a..9822628 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -750,10 +750,6 @@ static inline bool kern_addr_valid(unsigned long addr)
extern int page_in_phys_avail(unsigned long paddr);
-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn,
- unsigned long size, pgprot_t prot);
-
/*
* For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
* its high 4 bits. These macros/functions put it there or get it from there.
@@ -762,6 +758,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
+extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+ unsigned long, pgprot_t);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot)
+{
+ unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+ int space = GET_IOSPACE(pfn);
+ unsigned long phys_base;
+
+ phys_base = offset | (((unsigned long) space) << 32UL);
+
+ return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
+}
+
#include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index e27f8ea..0c218e4 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
extern void fpload(unsigned long *fpregs, unsigned long *fsr);
#else /* CONFIG_SPARC32 */
+
+#include <asm/trap_block.h>
+
struct popc_3insn_patch_entry {
unsigned int addr;
unsigned int insns[3];
@@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end;
extern void __init per_cpu_patch(void);
+extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ struct sun4v_1insn_patch_entry *);
+extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ struct sun4v_2insn_patch_entry *);
extern void __init sun4v_patch(void);
extern void __init boot_cpu_id_too_large(int cpu);
extern unsigned int dcache_parity_tl1_occurred;
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 99ba5ba..8172c18 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -17,6 +17,8 @@
#include <asm/processor.h>
#include <asm/spitfire.h>
+#include "entry.h"
+
#ifdef CONFIG_SPARC64
#include <linux/jump_label.h>
@@ -220,6 +222,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
}
#ifdef CONFIG_SPARC64
+static void do_patch_sections(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs)
+{
+ const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name))
+ sun4v_1insn = s;
+ if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name))
+ sun4v_2insn = s;
+ }
+
+ if (sun4v_1insn && tlb_type == hypervisor) {
+ void *p = (void *) sun4v_1insn->sh_addr;
+ sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size);
+ }
+ if (sun4v_2insn && tlb_type == hypervisor) {
+ void *p = (void *) sun4v_2insn->sh_addr;
+ sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size);
+ }
+}
+
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
@@ -227,6 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr,
/* make jump label nops */
jump_label_apply_nops(me);
+ do_patch_sections(hdr, sechdrs);
+
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index b01a06e..9e73c4a 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (!irq)
return -ENOMEM;
- if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
- return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
+ if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+ return -EINVAL;
return irq;
}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 3c5bb78..4e7d3ff 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -234,40 +234,50 @@ void __init per_cpu_patch(void)
}
}
-void __init sun4v_patch(void)
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
+ struct sun4v_1insn_patch_entry *end)
{
- extern void sun4v_hvapi_init(void);
- struct sun4v_1insn_patch_entry *p1;
- struct sun4v_2insn_patch_entry *p2;
-
- if (tlb_type != hypervisor)
- return;
+ while (start < end) {
+ unsigned long addr = start->addr;
- p1 = &__sun4v_1insn_patch;
- while (p1 < &__sun4v_1insn_patch_end) {
- unsigned long addr = p1->addr;
-
- *(unsigned int *) (addr + 0) = p1->insn;
+ *(unsigned int *) (addr + 0) = start->insn;
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
- p1++;
+ start++;
}
+}
- p2 = &__sun4v_2insn_patch;
- while (p2 < &__sun4v_2insn_patch_end) {
- unsigned long addr = p2->addr;
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ struct sun4v_2insn_patch_entry *end)
+{
+ while (start < end) {
+ unsigned long addr = start->addr;
- *(unsigned int *) (addr + 0) = p2->insns[0];
+ *(unsigned int *) (addr + 0) = start->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
- *(unsigned int *) (addr + 4) = p2->insns[1];
+ *(unsigned int *) (addr + 4) = start->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
- p2++;
+ start++;
}
+}
+
+void __init sun4v_patch(void)
+{
+ extern void sun4v_hvapi_init(void);
+
+ if (tlb_type != hypervisor)
+ return;
+
+ sun4v_patch_1insn_range(&__sun4v_1insn_patch,
+ &__sun4v_1insn_patch_end);
+
+ sun4v_patch_2insn_range(&__sun4v_2insn_patch,
+ &__sun4v_2insn_patch_end);
sun4v_hvapi_init();
}
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 5d92488..2e58328 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -829,21 +829,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void do_signal32(sigset_t *oldset, struct pt_regs * regs,
- int restart_syscall, unsigned long orig_i0)
+void do_signal32(sigset_t *oldset, struct pt_regs * regs)
{
struct k_sigaction ka;
+ unsigned long orig_i0;
+ int restart_syscall;
siginfo_t info;
int signr;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- /* If the debugger messes with the program counter, it clears
- * the "in syscall" bit, directing us to not perform a syscall
- * restart.
- */
- if (restart_syscall && !pt_regs_is_syscall(regs))
- restart_syscall = 0;
+ restart_syscall = 0;
+ orig_i0 = 0;
+ if (pt_regs_is_syscall(regs) &&
+ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+ restart_syscall = 1;
+ orig_i0 = regs->u_regs[UREG_G6];
+ }
if (signr > 0) {
if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 04ede8f..2302567 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -525,10 +525,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
siginfo_t info;
int signr;
+ /* It's a lot of work and synchronization to add a new ptrace
+ * register for GDB to save and restore in order to get
+ * orig_i0 correct for syscall restarts when debugging.
+ *
+ * Although it should be the case that most of the global
+ * registers are volatile across a system call, glibc already
+ * depends upon that fact that we preserve them. So we can't
+ * just use any global register to save away the orig_i0 value.
+ *
+ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+ * preserved across a system call trap by various pieces of
+ * code in glibc.
+ *
+ * %g7 is used as the "thread register". %g6 is not used in
+ * any fixed manner. %g6 is used as a scratch register and
+ * a compiler temporary, but it's value is never used across
+ * a system call. Therefore %g6 is usable for orig_i0 storage.
+ */
if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
- restart_syscall = 1;
- else
- restart_syscall = 0;
+ regs->u_regs[UREG_G6] = orig_i0;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
@@ -541,8 +557,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
* the software "in syscall" bit, directing us to not perform
* a syscall restart.
*/
- if (restart_syscall && !pt_regs_is_syscall(regs))
- restart_syscall = 0;
+ restart_syscall = 0;
+ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) {
+ restart_syscall = 1;
+ orig_i0 = regs->u_regs[UREG_G6];
+ }
+
if (signr > 0) {
if (restart_syscall)
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 47509df..d58260b 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -535,11 +535,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
siginfo_t info;
int signr;
+ /* It's a lot of work and synchronization to add a new ptrace
+ * register for GDB to save and restore in order to get
+ * orig_i0 correct for syscall restarts when debugging.
+ *
+ * Although it should be the case that most of the global
+ * registers are volatile across a system call, glibc already
+ * depends upon that fact that we preserve them. So we can't
+ * just use any global register to save away the orig_i0 value.
+ *
+ * In particular %g2, %g3, %g4, and %g5 are all assumed to be
+ * preserved across a system call trap by various pieces of
+ * code in glibc.
+ *
+ * %g7 is used as the "thread register". %g6 is not used in
+ * any fixed manner. %g6 is used as a scratch register and
+ * a compiler temporary, but it's value is never used across
+ * a system call. Therefore %g6 is usable for orig_i0 storage.
+ */
if (pt_regs_is_syscall(regs) &&
- (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
- restart_syscall = 1;
- } else
- restart_syscall = 0;
+ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
+ regs->u_regs[UREG_G6] = orig_i0;
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
@@ -548,22 +564,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
- extern void do_signal32(sigset_t *, struct pt_regs *,
- int restart_syscall,
- unsigned long orig_i0);
- do_signal32(oldset, regs, restart_syscall, orig_i0);
+ extern void do_signal32(sigset_t *, struct pt_regs *);
+ do_signal32(oldset, regs);
return;
}
#endif
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- /* If the debugger messes with the program counter, it clears
- * the software "in syscall" bit, directing us to not perform
- * a syscall restart.
- */
- if (restart_syscall && !pt_regs_is_syscall(regs))
- restart_syscall = 0;
+ restart_syscall = 0;
+ if (pt_regs_is_syscall(regs) &&
+ (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
+ restart_syscall = 1;
+ orig_i0 = regs->u_regs[UREG_G6];
+ }
if (signr > 0) {
if (restart_syscall)
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 3635771..9384a0c 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a > b)
- rd_val |= 1 << i;
+ rd_val |= 8 >> i;
}
break;
case FCMPGT32_OPF:
for (i = 0; i < 2; i++) {
- s32 a = (rs1 >> (i * 32)) & 0xffff;
- s32 b = (rs2 >> (i * 32)) & 0xffff;
+ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a > b)
- rd_val |= 1 << i;
+ rd_val |= 2 >> i;
}
break;
@@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a <= b)
- rd_val |= 1 << i;
+ rd_val |= 8 >> i;
}
break;
case FCMPLE32_OPF:
for (i = 0; i < 2; i++) {
- s32 a = (rs1 >> (i * 32)) & 0xffff;
- s32 b = (rs2 >> (i * 32)) & 0xffff;
+ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a <= b)
- rd_val |= 1 << i;
+ rd_val |= 2 >> i;
}
break;
@@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a != b)
- rd_val |= 1 << i;
+ rd_val |= 8 >> i;
}
break;
case FCMPNE32_OPF:
for (i = 0; i < 2; i++) {
- s32 a = (rs1 >> (i * 32)) & 0xffff;
- s32 b = (rs2 >> (i * 32)) & 0xffff;
+ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a != b)
- rd_val |= 1 << i;
+ rd_val |= 2 >> i;
}
break;
@@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
s16 b = (rs2 >> (i * 16)) & 0xffff;
if (a == b)
- rd_val |= 1 << i;
+ rd_val |= 8 >> i;
}
break;
case FCMPEQ32_OPF:
for (i = 0; i < 2; i++) {
- s32 a = (rs1 >> (i * 32)) & 0xffff;
- s32 b = (rs2 >> (i * 32)) & 0xffff;
+ s32 a = (rs1 >> (i * 32)) & 0xffffffff;
+ s32 b = (rs2 >> (i * 32)) & 0xffffffff;
if (a == b)
- rd_val |= 1 << i;
+ rd_val |= 2 >> i;
}
break;
}
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index 34fe657..4d8c497 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -7,40 +7,12 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#ifdef __KERNEL__
-
-#define FUNC(x) \
+#define FUNC(x) \
.globl x; \
.type x,@function; \
- .align 4; \
+ .align 4; \
x:
-#undef FASTER_REVERSE
-#undef FASTER_NONALIGNED
-#define FASTER_ALIGNED
-
-/* In kernel these functions don't return a value.
- * One should use macros in asm/string.h for that purpose.
- * We return 0, so that bugs are more apparent.
- */
-#define SETUP_RETL
-#define RETL_INSN clr %o0
-
-#else
-
-/* libc */
-
-#include "DEFS.h"
-
-#define FASTER_REVERSE
-#define FASTER_NONALIGNED
-#define FASTER_ALIGNED
-
-#define SETUP_RETL mov %o0, %g6
-#define RETL_INSN mov %g6, %o0
-
-#endif
-
/* Both these macros have to start with exactly the same insn */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd [%src + (offset) + 0x00], %t0; \
@@ -164,30 +136,6 @@ x:
.text
.align 4
-#ifdef FASTER_REVERSE
-
-70: /* rdword_align */
-
- andcc %o1, 1, %g0
- be 4f
- andcc %o1, 2, %g0
-
- ldub [%o1 - 1], %g2
- sub %o1, 1, %o1
- stb %g2, [%o0 - 1]
- sub %o2, 1, %o2
- be 3f
- sub %o0, 1, %o0
-4:
- lduh [%o1 - 2], %g2
- sub %o1, 2, %o1
- sth %g2, [%o0 - 2]
- sub %o2, 2, %o2
- b 3f
- sub %o0, 2, %o0
-
-#endif /* FASTER_REVERSE */
-
0:
retl
nop ! Only bcopy returns here and it retuns void...
@@ -198,7 +146,7 @@ FUNC(__memmove)
#endif
FUNC(memmove)
cmp %o0, %o1
- SETUP_RETL
+ mov %o0, %g7
bleu 9f
sub %o0, %o1, %o4
@@ -207,8 +155,6 @@ FUNC(memmove)
bleu 0f
andcc %o4, 3, %o5
-#ifndef FASTER_REVERSE
-
add %o1, %o2, %o1
add %o0, %o2, %o0
sub %o1, 1, %o1
@@ -224,295 +170,7 @@ FUNC(memmove)
sub %o0, 1, %o0
retl
- RETL_INSN
-
-#else /* FASTER_REVERSE */
-
- add %o1, %o2, %o1
- add %o0, %o2, %o0
- bne 77f
- cmp %o2, 15
- bleu 91f
- andcc %o1, 3, %g0
- bne 70b
-3:
- andcc %o1, 4, %g0
-
- be 2f
- mov %o2, %g1
-
- ld [%o1 - 4], %o4
- sub %g1, 4, %g1
- st %o4, [%o0 - 4]
- sub %o1, 4, %o1
- sub %o0, 4, %o0
-2:
- andcc %g1, 0xffffff80, %g7
- be 3f
- andcc %o0, 4, %g0
-
- be 74f + 4
-5:
- RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
- sub %o1, 128, %o1
- bne 5b
- sub %o0, 128, %o0
-3:
- andcc %g1, 0x70, %g7
- be 72f
- andcc %g1, 8, %g0
-
- sethi %hi(72f), %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- sub %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + %lo(72f), %g0
- sub %o0, %g7, %o0
-
-71: /* rmemcpy_table */
- RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
- RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
-
-72: /* rmemcpy_table_end */
-
- be 73f
- andcc %g1, 4, %g0
-
- ldd [%o1 - 0x08], %g2
- sub %o0, 8, %o0
- sub %o1, 8, %o1
- st %g2, [%o0]
- st %g3, [%o0 + 0x04]
-
-73: /* rmemcpy_last7 */
-
- be 1f
- andcc %g1, 2, %g0
-
- ld [%o1 - 4], %g2
- sub %o1, 4, %o1
- st %g2, [%o0 - 4]
- sub %o0, 4, %o0
-1:
- be 1f
- andcc %g1, 1, %g0
-
- lduh [%o1 - 2], %g2
- sub %o1, 2, %o1
- sth %g2, [%o0 - 2]
- sub %o0, 2, %o0
-1:
- be 1f
- nop
-
- ldub [%o1 - 1], %g2
- stb %g2, [%o0 - 1]
-1:
- retl
- RETL_INSN
-
-74: /* rldd_std */
- RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
- sub %o1, 128, %o1
- bne 74b
- sub %o0, 128, %o0
-
- andcc %g1, 0x70, %g7
- be 72b
- andcc %g1, 8, %g0
-
- sethi %hi(72b), %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- sub %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + %lo(72b), %g0
- sub %o0, %g7, %o0
-
-75: /* rshort_end */
-
- and %o2, 0xe, %o3
-2:
- sethi %hi(76f), %o5
- sll %o3, 3, %o4
- sub %o0, %o3, %o0
- sub %o5, %o4, %o5
- sub %o1, %o3, %o1
- jmpl %o5 + %lo(76f), %g0
- andcc %o2, 1, %g0
-
- RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
-
-76: /* rshort_table_end */
-
- be 1f
- nop
- ldub [%o1 - 1], %g2
- stb %g2, [%o0 - 1]
-1:
- retl
- RETL_INSN
-
-91: /* rshort_aligned_end */
-
- bne 75b
- andcc %o2, 8, %g0
-
- be 1f
- andcc %o2, 4, %g0
-
- ld [%o1 - 0x08], %g2
- ld [%o1 - 0x04], %g3
- sub %o1, 8, %o1
- st %g2, [%o0 - 0x08]
- st %g3, [%o0 - 0x04]
- sub %o0, 8, %o0
-1:
- b 73b
- mov %o2, %g1
-
-77: /* rnon_aligned */
- cmp %o2, 15
- bleu 75b
- andcc %o0, 3, %g0
- be 64f
- andcc %o0, 1, %g0
- be 63f
- andcc %o0, 2, %g0
- ldub [%o1 - 1], %g5
- sub %o1, 1, %o1
- stb %g5, [%o0 - 1]
- sub %o0, 1, %o0
- be 64f
- sub %o2, 1, %o2
-63:
- ldub [%o1 - 1], %g5
- sub %o1, 2, %o1
- stb %g5, [%o0 - 1]
- sub %o0, 2, %o0
- ldub [%o1], %g5
- sub %o2, 2, %o2
- stb %g5, [%o0]
-64:
- and %o1, 3, %g2
- and %o1, -4, %o1
- and %o2, 0xc, %g3
- add %o1, 4, %o1
- cmp %g3, 4
- sll %g2, 3, %g4
- mov 32, %g2
- be 4f
- sub %g2, %g4, %g7
-
- blu 3f
- cmp %g3, 8
-
- be 2f
- srl %o2, 2, %g3
-
- ld [%o1 - 4], %o3
- add %o0, -8, %o0
- ld [%o1 - 8], %o4
- add %o1, -16, %o1
- b 7f
- add %g3, 1, %g3
-2:
- ld [%o1 - 4], %o4
- add %o0, -4, %o0
- ld [%o1 - 8], %g1
- add %o1, -12, %o1
- b 8f
- add %g3, 2, %g3
-3:
- ld [%o1 - 4], %o5
- add %o0, -12, %o0
- ld [%o1 - 8], %o3
- add %o1, -20, %o1
- b 6f
- srl %o2, 2, %g3
-4:
- ld [%o1 - 4], %g1
- srl %o2, 2, %g3
- ld [%o1 - 8], %o5
- add %o1, -24, %o1
- add %o0, -16, %o0
- add %g3, -1, %g3
-
- ld [%o1 + 12], %o3
-5:
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
- or %g2, %g5, %g2
- st %g2, [%o0 + 12]
-6:
- ld [%o1 + 8], %o4
- sll %o3, %g4, %g2
- srl %o5, %g7, %g5
- or %g2, %g5, %g2
- st %g2, [%o0 + 8]
-7:
- ld [%o1 + 4], %g1
- sll %o4, %g4, %g2
- srl %o3, %g7, %g5
- or %g2, %g5, %g2
- st %g2, [%o0 + 4]
-8:
- ld [%o1], %o5
- sll %g1, %g4, %g2
- srl %o4, %g7, %g5
- addcc %g3, -4, %g3
- or %g2, %g5, %g2
- add %o1, -16, %o1
- st %g2, [%o0]
- add %o0, -16, %o0
- bne,a 5b
- ld [%o1 + 12], %o3
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
- srl %g4, 3, %g3
- or %g2, %g5, %g2
- add %o1, %g3, %o1
- andcc %o2, 2, %g0
- st %g2, [%o0 + 12]
- be 1f
- andcc %o2, 1, %g0
-
- ldub [%o1 + 15], %g5
- add %o1, -2, %o1
- stb %g5, [%o0 + 11]
- add %o0, -2, %o0
- ldub [%o1 + 16], %g5
- stb %g5, [%o0 + 12]
-1:
- be 1f
- nop
- ldub [%o1 + 15], %g5
- stb %g5, [%o0 + 11]
-1:
- retl
- RETL_INSN
-
-#endif /* FASTER_REVERSE */
+ mov %g7, %o0
/* NOTE: This code is executed just for the cases,
where %src (=%o1) & 3 is != 0.
@@ -546,7 +204,7 @@ FUNC(memmove)
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
sub %o0, %o1, %o4
- SETUP_RETL
+ mov %o0, %g7
9:
andcc %o4, 3, %o5
0:
@@ -569,7 +227,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
add %o1, 4, %o1
add %o0, 4, %o0
2:
- andcc %g1, 0xffffff80, %g7
+ andcc %g1, 0xffffff80, %g0
be 3f
andcc %o0, 4, %g0
@@ -579,22 +237,23 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
+ sub %g1, 128, %g1
add %o1, 128, %o1
- bne 5b
+ cmp %g1, 128
+ bge 5b
add %o0, 128, %o0
3:
- andcc %g1, 0x70, %g7
+ andcc %g1, 0x70, %g4
be 80f
andcc %g1, 8, %g0
sethi %hi(80f), %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
+ srl %g4, 1, %o4
+ add %g4, %o4, %o4
+ add %o1, %g4, %o1
sub %o5, %o4, %o5
jmpl %o5 + %lo(80f), %g0
- add %o0, %g7, %o0
+ add %o0, %g4, %o0
79: /* memcpy_table */
@@ -641,43 +300,28 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
stb %g2, [%o0]
1:
retl
- RETL_INSN
+ mov %g7, %o0
82: /* ldd_std */
MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
+ subcc %g1, 128, %g1
add %o1, 128, %o1
- bne 82b
+ cmp %g1, 128
+ bge 82b
add %o0, 128, %o0
-#ifndef FASTER_ALIGNED
-
- andcc %g1, 0x70, %g7
- be 80b
- andcc %g1, 8, %g0
-
- sethi %hi(80b), %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + %lo(80b), %g0
- add %o0, %g7, %o0
-
-#else /* FASTER_ALIGNED */
-
- andcc %g1, 0x70, %g7
+ andcc %g1, 0x70, %g4
be 84f
andcc %g1, 8, %g0
sethi %hi(84f), %o5
- add %o1, %g7, %o1
- sub %o5, %g7, %o5
+ add %o1, %g4, %o1
+ sub %o5, %g4, %o5
jmpl %o5 + %lo(84f), %g0
- add %o0, %g7, %o0
+ add %o0, %g4, %o0
83: /* amemcpy_table */
@@ -721,382 +365,132 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
stb %g2, [%o0]
1:
retl
- RETL_INSN
-
-#endif /* FASTER_ALIGNED */
+ mov %g7, %o0
86: /* non_aligned */
cmp %o2, 6
bleu 88f
+ nop
-#ifdef FASTER_NONALIGNED
-
- cmp %o2, 256
- bcc 87f
-
-#endif /* FASTER_NONALIGNED */
-
- andcc %o0, 3, %g0
+ save %sp, -96, %sp
+ andcc %i0, 3, %g0
be 61f
- andcc %o0, 1, %g0
+ andcc %i0, 1, %g0
be 60f
- andcc %o0, 2, %g0
+ andcc %i0, 2, %g0
- ldub [%o1], %g5
- add %o1, 1, %o1
- stb %g5, [%o0]
- sub %o2, 1, %o2
+ ldub [%i1], %g5
+ add %i1, 1, %i1
+ stb %g5, [%i0]
+ sub %i2, 1, %i2
bne 61f
- add %o0, 1, %o0
+ add %i0, 1, %i0
60:
- ldub [%o1], %g3
- add %o1, 2, %o1
- stb %g3, [%o0]
- sub %o2, 2, %o2
- ldub [%o1 - 1], %g3
- add %o0, 2, %o0
- stb %g3, [%o0 - 1]
+ ldub [%i1], %g3
+ add %i1, 2, %i1
+ stb %g3, [%i0]
+ sub %i2, 2, %i2
+ ldub [%i1 - 1], %g3
+ add %i0, 2, %i0
+ stb %g3, [%i0 - 1]
61:
- and %o1, 3, %g2
- and %o2, 0xc, %g3
- and %o1, -4, %o1
+ and %i1, 3, %g2
+ and %i2, 0xc, %g3
+ and %i1, -4, %i1
cmp %g3, 4
sll %g2, 3, %g4
mov 32, %g2
be 4f
- sub %g2, %g4, %g7
+ sub %g2, %g4, %l0
blu 3f
cmp %g3, 0x8
be 2f
- srl %o2, 2, %g3
+ srl %i2, 2, %g3
- ld [%o1], %o3
- add %o0, -8, %o0
- ld [%o1 + 4], %o4
+ ld [%i1], %i3
+ add %i0, -8, %i0
+ ld [%i1 + 4], %i4
b 8f
add %g3, 1, %g3
2:
- ld [%o1], %o4
- add %o0, -12, %o0
- ld [%o1 + 4], %o5
+ ld [%i1], %i4
+ add %i0, -12, %i0
+ ld [%i1 + 4], %i5
add %g3, 2, %g3
b 9f
- add %o1, -4, %o1
+ add %i1, -4, %i1
3:
- ld [%o1], %g1
- add %o0, -4, %o0
- ld [%o1 + 4], %o3
- srl %o2, 2, %g3
+ ld [%i1], %g1
+ add %i0, -4, %i0
+ ld [%i1 + 4], %i3
+ srl %i2, 2, %g3
b 7f
- add %o1, 4, %o1
+ add %i1, 4, %i1
4:
- ld [%o1], %o5
- cmp %o2, 7
- ld [%o1 + 4], %g1
- srl %o2, 2, %g3
+ ld [%i1], %i5
+ cmp %i2, 7
+ ld [%i1 + 4], %g1
+ srl %i2, 2, %g3
bleu 10f
- add %o1, 8, %o1
+ add %i1, 8, %i1
- ld [%o1], %o3
+ ld [%i1], %i3
add %g3, -1, %g3
5:
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
+ sll %i5, %g4, %g2
+ srl %g1, %l0, %g5
or %g2, %g5, %g2
- st %g2, [%o0]
+ st %g2, [%i0]
7:
- ld [%o1 + 4], %o4
+ ld [%i1 + 4], %i4
sll %g1, %g4, %g2
- srl %o3, %g7, %g5
+ srl %i3, %l0, %g5
or %g2, %g5, %g2
- st %g2, [%o0 + 4]
+ st %g2, [%i0 + 4]
8:
- ld [%o1 + 8], %o5
- sll %o3, %g4, %g2
- srl %o4, %g7, %g5
+ ld [%i1 + 8], %i5
+ sll %i3, %g4, %g2
+ srl %i4, %l0, %g5
or %g2, %g5, %g2
- st %g2, [%o0 + 8]
+ st %g2, [%i0 + 8]
9:
- ld [%o1 + 12], %g1
- sll %o4, %g4, %g2
- srl %o5, %g7, %g5
+ ld [%i1 + 12], %g1
+ sll %i4, %g4, %g2
+ srl %i5, %l0, %g5
addcc %g3, -4, %g3
or %g2, %g5, %g2
- add %o1, 16, %o1
- st %g2, [%o0 + 12]
- add %o0, 16, %o0
+ add %i1, 16, %i1
+ st %g2, [%i0 + 12]
+ add %i0, 16, %i0
bne,a 5b
- ld [%o1], %o3
+ ld [%i1], %i3
10:
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
- srl %g7, 3, %g3
+ sll %i5, %g4, %g2
+ srl %g1, %l0, %g5
+ srl %l0, 3, %g3
or %g2, %g5, %g2
- sub %o1, %g3, %o1
- andcc %o2, 2, %g0
- st %g2, [%o0]
+ sub %i1, %g3, %i1
+ andcc %i2, 2, %g0
+ st %g2, [%i0]
be 1f
- andcc %o2, 1, %g0
-
- ldub [%o1], %g2
- add %o1, 2, %o1
- stb %g2, [%o0 + 4]
- add %o0, 2, %o0
- ldub [%o1 - 1], %g2
- stb %g2, [%o0 + 3]
+ andcc %i2, 1, %g0
+
+ ldub [%i1], %g2
+ add %i1, 2, %i1
+ stb %g2, [%i0 + 4]
+ add %i0, 2, %i0
+ ldub [%i1 - 1], %g2
+ stb %g2, [%i0 + 3]
1:
be 1f
nop
- ldub [%o1], %g2
- stb %g2, [%o0 + 4]
-1:
- retl
- RETL_INSN
-
-#ifdef FASTER_NONALIGNED
-
-87: /* faster_nonaligned */
-
- andcc %o1, 3, %g0
- be 3f
- andcc %o1, 1, %g0
-
- be 4f
- andcc %o1, 2, %g0
-
- ldub [%o1], %g2
- add %o1, 1, %o1
- stb %g2, [%o0]
- sub %o2, 1, %o2
- bne 3f
- add %o0, 1, %o0
-4:
- lduh [%o1], %g2
- add %o1, 2, %o1
- srl %g2, 8, %g3
- sub %o2, 2, %o2
- stb %g3, [%o0]
- add %o0, 2, %o0
- stb %g2, [%o0 - 1]
-3:
- andcc %o1, 4, %g0
-
- bne 2f
- cmp %o5, 1
-
- ld [%o1], %o4
- srl %o4, 24, %g2
- stb %g2, [%o0]
- srl %o4, 16, %g3
- stb %g3, [%o0 + 1]
- srl %o4, 8, %g2
- stb %g2, [%o0 + 2]
- sub %o2, 4, %o2
- stb %o4, [%o0 + 3]
- add %o1, 4, %o1
- add %o0, 4, %o0
-2:
- be 33f
- cmp %o5, 2
- be 32f
- sub %o2, 4, %o2
-31:
- ld [%o1], %g2
- add %o1, 4, %o1
- srl %g2, 24, %g3
- and %o0, 7, %g5
- stb %g3, [%o0]
- cmp %g5, 7
- sll %g2, 8, %g1
- add %o0, 4, %o0
- be 41f
- and %o2, 0xffffffc0, %o3
- ld [%o0 - 7], %o4
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 4b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 16, %g2
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 16, %g2
-1:
- st %o4, [%o0 - 7]
- sth %g2, [%o0 - 3]
- srl %g1, 8, %g4
- b 88f
- stb %g4, [%o0 - 1]
-32:
- ld [%o1], %g2
- add %o1, 4, %o1
- srl %g2, 16, %g3
- and %o0, 7, %g5
- sth %g3, [%o0]
- cmp %g5, 6
- sll %g2, 16, %g1
- add %o0, 4, %o0
- be 42f
- and %o2, 0xffffffc0, %o3
- ld [%o0 - 6], %o4
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 4b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 16, %g2
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 16, %g2
-1:
- st %o4, [%o0 - 6]
- b 88f
- sth %g2, [%o0 - 2]
-33:
- ld [%o1], %g2
- sub %o2, 4, %o2
- srl %g2, 24, %g3
- and %o0, 7, %g5
- stb %g3, [%o0]
- cmp %g5, 5
- srl %g2, 8, %g4
- sll %g2, 24, %g1
- sth %g4, [%o0 + 1]
- add %o1, 4, %o1
- be 43f
- and %o2, 0xffffffc0, %o3
-
- ld [%o0 - 1], %o4
- add %o0, 4, %o0
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
- SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
- SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
- SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 4b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 24, %g2
-4:
- SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 24, %g2
-1:
- st %o4, [%o0 - 5]
- b 88f
- stb %g2, [%o0 - 1]
-41:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 41b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 16, %g2
-4:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 16, %g2
+ ldub [%i1], %g2
+ stb %g2, [%i0 + 4]
1:
- sth %g2, [%o0 - 3]
- srl %g1, 8, %g4
- b 88f
- stb %g4, [%o0 - 1]
-43:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 43b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 24, %g2
-4:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 24, %g2
-1:
- stb %g2, [%o0 + 3]
- b 88f
- add %o0, 4, %o0
-42:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- subcc %o3, 64, %o3
- add %o1, 64, %o1
- bne 42b
- add %o0, 64, %o0
-
- andcc %o2, 0x30, %o3
- be,a 1f
- srl %g1, 16, %g2
-4:
- SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
- subcc %o3, 16, %o3
- add %o1, 16, %o1
- bne 4b
- add %o0, 16, %o0
-
- srl %g1, 16, %g2
-1:
- sth %g2, [%o0 - 2]
-
- /* Fall through */
-
-#endif /* FASTER_NONALIGNED */
+ ret
+ restore %g7, %g0, %o0
88: /* short_end */
@@ -1127,7 +521,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
stb %g2, [%o0]
1:
retl
- RETL_INSN
+ mov %g7, %o0
90: /* short_aligned_end */
bne 88b
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 79836a7..3b6e248 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += loadmmu.o
-obj-y += generic_$(BITS).o
obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 5175ac2..8a7f817 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -302,8 +302,7 @@ void __init btfixup(void)
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
- else if ((insn & 0x80002000) == 0x80002000 &&
- (insn & 0x01800000) != 0x01800000) /* %LO */
+ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
deleted file mode 100644
index e6067b7..0000000
--- a/arch/sparc/mm/generic_32.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- * MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
- unsigned long offset, pgprot_t prot, int space)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space));
- address += PAGE_SIZE;
- offset += PAGE_SIZE;
- pte++;
- } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long offset, pgprot_t prot, int space)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- offset -= address;
- do {
- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
- if (!pte)
- return -ENOMEM;
- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = from;
- unsigned long end = from + size;
- struct mm_struct *mm = vma->vm_mm;
- int space = GET_IOSPACE(pfn);
- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
-
- /* See comment in mm/memory.c remap_pfn_range */
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
- vma->vm_pgoff = (offset >> PAGE_SHIFT) |
- ((unsigned long)space << 28UL);
-
- offset -= from;
- dir = pgd_offset(mm, from);
- flush_cache_range(vma, beg, end);
-
- while (from < end) {
- pmd_t *pmd = pmd_alloc(mm, dir, from);
- error = -ENOMEM;
- if (!pmd)
- break;
- error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
- if (error)
- break;
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- }
-
- flush_tlb_range(vma, beg, end);
- return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
deleted file mode 100644
index 3cb00df..0000000
--- a/arch/sparc/mm/generic_64.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * generic.c: Generic Sparc mm routines that are not dependent upon
- * MMU type but are Sparc specific.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/pagemap.h>
-
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-
-/* Remap IO memory, the same way as remap_pfn_range(), but use
- * the obio memory space.
- *
- * They use a pgprot that sets PAGE_IO and does not check the
- * mem_map table as this is independent of normal memory.
- */
-static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
- unsigned long address,
- unsigned long size,
- unsigned long offset, pgprot_t prot,
- int space)
-{
- unsigned long end;
-
- /* clear hack bit that was used as a write_combine side-effect flag */
- offset &= ~0x1UL;
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- pte_t entry;
- unsigned long curend = address + PAGE_SIZE;
-
- entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
- if (!(address & 0xffff)) {
- if (PAGE_SIZE < (4 * 1024 * 1024) &&
- !(address & 0x3fffff) &&
- !(offset & 0x3ffffe) &&
- end >= address + 0x400000) {
- entry = mk_pte_io(offset, prot, space,
- 4 * 1024 * 1024);
- curend = address + 0x400000;
- offset += 0x400000;
- } else if (PAGE_SIZE < (512 * 1024) &&
- !(address & 0x7ffff) &&
- !(offset & 0x7fffe) &&
- end >= address + 0x80000) {
- entry = mk_pte_io(offset, prot, space,
- 512 * 1024 * 1024);
- curend = address + 0x80000;
- offset += 0x80000;
- } else if (PAGE_SIZE < (64 * 1024) &&
- !(offset & 0xfffe) &&
- end >= address + 0x10000) {
- entry = mk_pte_io(offset, prot, space,
- 64 * 1024);
- curend = address + 0x10000;
- offset += 0x10000;
- } else
- offset += PAGE_SIZE;
- } else
- offset += PAGE_SIZE;
-
- if (pte_write(entry))
- entry = pte_mkdirty(entry);
- do {
- BUG_ON(!pte_none(*pte));
- set_pte_at(mm, address, pte, entry);
- address += PAGE_SIZE;
- pte_val(entry) += PAGE_SIZE;
- pte++;
- } while (address < curend);
- } while (address < end);
-}
-
-static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long offset, pgprot_t prot, int space)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- offset -= address;
- do {
- pte_t *pte = pte_alloc_map(mm, NULL, pmd, address);
- if (!pte)
- return -ENOMEM;
- io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
- pte_unmap(pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- return 0;
-}
-
-static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
- unsigned long offset, pgprot_t prot, int space)
-{
- unsigned long end;
-
- address &= ~PUD_MASK;
- end = address + size;
- if (end > PUD_SIZE)
- end = PUD_SIZE;
- offset -= address;
- do {
- pmd_t *pmd = pmd_alloc(mm, pud, address);
- if (!pud)
- return -ENOMEM;
- io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
- address = (address + PUD_SIZE) & PUD_MASK;
- pud++;
- } while (address < end);
- return 0;
-}
-
-int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = from;
- unsigned long end = from + size;
- struct mm_struct *mm = vma->vm_mm;
- int space = GET_IOSPACE(pfn);
- unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
- unsigned long phys_base;
-
- phys_base = offset | (((unsigned long) space) << 32UL);
-
- /* See comment in mm/memory.c remap_pfn_range */
- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
- vma->vm_pgoff = phys_base >> PAGE_SHIFT;
-
- offset -= from;
- dir = pgd_offset(mm, from);
- flush_cache_range(vma, beg, end);
-
- while (from < end) {
- pud_t *pud = pud_alloc(mm, dir, from);
- error = -ENOMEM;
- if (!pud)
- break;
- error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
- if (error)
- break;
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- }
-
- flush_tlb_range(vma, beg, end);
- return error;
-}
-EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 620f5b7..0491e40 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -513,8 +513,37 @@ __uml_exitcall(kill_io_thread);
static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
{
char *file;
+ int fd;
+ int err;
+
+ __u32 version;
+ __u32 align;
+ char *backing_file;
+ time_t mtime;
+ unsigned long long size;
+ int sector_size;
+ int bitmap_offset;
+
+ if (ubd_dev->file && ubd_dev->cow.file) {
+ file = ubd_dev->cow.file;
+
+ goto out;
+ }
- file = ubd_dev->cow.file ? ubd_dev->cow.file : ubd_dev->file;
+ fd = os_open_file(ubd_dev->file, global_openflags, 0);
+ if (fd < 0)
+ return fd;
+
+ err = read_cow_header(file_reader, &fd, &version, &backing_file, \
+ &mtime, &size, &sector_size, &align, &bitmap_offset);
+ os_close_file(fd);
+
+ if(err == -EINVAL)
+ file = ubd_dev->file;
+ else
+ file = backing_file;
+
+out:
return os_file_size(file, size_out);
}
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 67f87f2..78a1eff 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_AMD_NB_H
#define _ASM_X86_AMD_NB_H
+#include <linux/ioport.h>
#include <linux/pci.h>
struct amd_nb_bus_dev_range {
@@ -13,6 +14,7 @@ extern const struct pci_device_id amd_nb_misc_ids[];
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
extern bool early_is_amd_nb(u32 value);
+extern struct resource *amd_get_mmconfig_range(struct resource *res);
extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 4a0b7c7..244ac77 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -495,7 +495,7 @@ static inline void default_wait_for_init_deassert(atomic_t *deassert)
return;
}
-extern struct apic *generic_bigsmp_probe(void);
+extern void generic_bigsmp_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index fa7b917..431793e 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -32,6 +32,22 @@ extern int no_timer_check;
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC. Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ * - sqazi@google.com
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
+ unsigned long long quot;
+ unsigned long long rem;
int cpu = smp_processor_id();
unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+ quot = (cyc >> CYC2NS_SCALE_FACTOR);
+ rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+ ns += quot * per_cpu(cyc2ns, cpu) +
+ ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
return ns;
}
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index a291c40..5d62d65 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -55,6 +55,7 @@
#define UV_BAU_TUNABLES_DIR "sgi_uv"
#define UV_BAU_TUNABLES_FILE "bau_tunables"
#define WHITESPACE " \t\n"
+#define uv_mmask ((1UL << uv_hub_info->m_val) - 1)
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index f26544a..21f7385 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -46,6 +46,13 @@
* PNODE - the low N bits of the GNODE. The PNODE is the most useful variant
* of the nasid for socket usage.
*
+ * GPA - (global physical address) a socket physical address converted
+ * so that it can be used by the GRU as a global address. Socket
+ * physical addresses 1) need additional NASID (node) bits added
+ * to the high end of the address, and 2) unaliased if the
+ * partition does not have a physical address 0. In addition, on
+ * UV2 rev 1, GPAs need the gnode left shifted to bits 39 or 40.
+ *
*
* NumaLink Global Physical Address Format:
* +--------------------------------+---------------------+
@@ -141,6 +148,8 @@ struct uv_hub_info_s {
unsigned int gnode_extra;
unsigned char hub_revision;
unsigned char apic_pnode_shift;
+ unsigned char m_shift;
+ unsigned char n_lshift;
unsigned long gnode_upper;
unsigned long lowmem_remap_top;
unsigned long lowmem_remap_base;
@@ -177,6 +186,16 @@ static inline int is_uv2_hub(void)
return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
}
+static inline int is_uv2_1_hub(void)
+{
+ return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE;
+}
+
+static inline int is_uv2_2_hub(void)
+{
+ return uv_hub_info->hub_revision == UV2_HUB_REVISION_BASE + 1;
+}
+
union uvh_apicid {
unsigned long v;
struct uvh_apicid_s {
@@ -276,7 +295,10 @@ static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
{
if (paddr < uv_hub_info->lowmem_remap_top)
paddr |= uv_hub_info->lowmem_remap_base;
- return paddr | uv_hub_info->gnode_upper;
+ paddr |= uv_hub_info->gnode_upper;
+ paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ ((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
+ return paddr;
}
@@ -296,20 +318,23 @@ uv_gpa_in_mmr_space(unsigned long gpa)
/* UV global physical address --> socket phys RAM */
static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
{
- unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+ unsigned long paddr;
unsigned long remap_base = uv_hub_info->lowmem_remap_base;
unsigned long remap_top = uv_hub_info->lowmem_remap_top;
+ gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
+ ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
+ paddr = gpa & uv_hub_info->gpa_mask;
if (paddr >= remap_base && paddr < remap_base + remap_top)
paddr -= remap_base;
return paddr;
}
-/* gnode -> pnode */
+/* gpa -> pnode */
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
{
- return gpa >> uv_hub_info->m_val;
+ return gpa >> uv_hub_info->n_lshift;
}
/* gpa -> pnode */
@@ -320,6 +345,12 @@ static inline int uv_gpa_to_pnode(unsigned long gpa)
return uv_gpa_to_gnode(gpa) & n_mask;
}
+/* gpa -> node offset*/
+static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
+{
+ return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
+}
+
/* pnode, offset --> socket virtual */
static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
{
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d3d9d50..bfd75ff 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1203,7 +1203,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
- dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
+ dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
}
update_domain(&dma_dom->domain);
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4c39baa..bae1efe 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -119,6 +119,37 @@ bool __init early_is_amd_nb(u32 device)
return false;
}
+struct resource *amd_get_mmconfig_range(struct resource *res)
+{
+ u32 address;
+ u64 base, msr;
+ unsigned segn_busn_bits;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+ return NULL;
+
+ /* assume all cpus from fam10h have mmconfig */
+ if (boot_cpu_data.x86 < 0x10)
+ return NULL;
+
+ address = MSR_FAM10H_MMIO_CONF_BASE;
+ rdmsrl(address, msr);
+
+ /* mmconfig is not enabled */
+ if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+ return NULL;
+
+ base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+
+ segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+ FAM10H_MMIO_CONF_BUSRANGE_MASK;
+
+ res->flags = IORESOURCE_MEM;
+ res->start = base;
+ res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+ return res;
+}
+
int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index efd737e..521bead 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -255,12 +255,24 @@ static struct apic apic_bigsmp = {
.x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
};
-struct apic * __init generic_bigsmp_probe(void)
+void __init generic_bigsmp_probe(void)
{
- if (probe_bigsmp())
- return &apic_bigsmp;
+ unsigned int cpu;
- return NULL;
+ if (!probe_bigsmp())
+ return;
+
+ apic = &apic_bigsmp;
+
+ for_each_possible_cpu(cpu) {
+ if (early_per_cpu(x86_cpu_to_logical_apicid,
+ cpu) == BAD_APICID)
+ continue;
+ early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
+ bigsmp_early_logical_apicid(cpu);
+ }
+
+ pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
}
apic_driver(apic_bigsmp);
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index b5254ad..0787bb3 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -200,14 +200,8 @@ void __init default_setup_apic_routing(void)
* - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
*/
- if (!cmdline_apic && apic == &apic_default) {
- struct apic *bigsmp = generic_bigsmp_probe();
- if (bigsmp) {
- apic = bigsmp;
- printk(KERN_INFO "Overriding APIC driver with %s\n",
- apic->name);
- }
- }
+ if (!cmdline_apic && apic == &apic_default)
+ generic_bigsmp_probe();
#endif
if (apic->setup_apic_routing)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 34b1859..874c208 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -779,7 +779,12 @@ void __init uv_system_init(void)
for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
uv_possible_blades +=
hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
- printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
+
+ /* uv_num_possible_blades() is really the hub count */
+ printk(KERN_INFO "UV: Found %d blades, %d hubs\n",
+ is_uv1_hub() ? uv_num_possible_blades() :
+ (uv_num_possible_blades() + 1) / 2,
+ uv_num_possible_blades());
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
uv_blade_info = kzalloc(bytes, GFP_KERNEL);
@@ -832,6 +837,10 @@ void __init uv_system_init(void)
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
+ uv_cpu_hub_info(cpu)->m_shift = 64 - m_val;
+ uv_cpu_hub_info(cpu)->n_lshift = is_uv2_1_hub() ?
+ (m_val == 40 ? 40 : 39) : m_val;
+
pnode = uv_apicid_to_pnode(apicid);
blade = boot_pnode_to_blade(pnode);
lcpu = uv_blade_info[blade].nr_possible_cpus;
@@ -862,8 +871,7 @@ void __init uv_system_init(void)
if (uv_node_to_blade[nid] >= 0)
continue;
paddr = node_start_pfn(nid) << PAGE_SHIFT;
- paddr = uv_soc_phys_ram_to_gpa(paddr);
- pnode = (paddr >> m_val) & pnode_mask;
+ pnode = uv_gpa_to_pnode(uv_soc_phys_ram_to_gpa(paddr));
blade = boot_pnode_to_blade(pnode);
uv_node_to_blade[nid] = blade;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index bab491b..d812fe2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -508,6 +508,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
+ int is_64bit = 0;
/*
* We don't need to fixup if the PEBS assist is fault like
@@ -559,7 +560,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} else
kaddr = (void *)to;
- kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+ is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+ insn_init(&insn, kaddr, is_64bit);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 6781765..aa083d3 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1054,6 +1054,14 @@ int hpet_rtc_timer_init(void)
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+static void hpet_disable_rtc_channel(void)
+{
+ unsigned long cfg;
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+}
+
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
@@ -1065,6 +1073,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
return 0;
hpet_rtc_flags &= ~bit_mask;
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
+
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1130,15 +1141,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, delta;
+ unsigned int delta;
int lost_ints = -1;
- if (unlikely(!hpet_rtc_flags)) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index f1a6244..794bc95 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -75,8 +75,10 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* Undefined/reserved opcodes, conditional jump, Opcode Extension
* Groups, and some special opcodes can not boost.
+ * This is non-const to keep gcc from statically optimizing it out, as
+ * variable_test_bit makes gcc think only *(unsigned long*) is used.
*/
-static const u32 twobyte_is_boostable[256 / 32] = {
+static u32 twobyte_is_boostable[256 / 32] = {
/* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
/* ---------------------------------------------- */
W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index c561038..b727450 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -298,13 +298,33 @@ free_table:
return state;
}
+/*
+ * AMD microcode firmware naming convention, up to family 15h they are in
+ * the legacy file:
+ *
+ * amd-ucode/microcode_amd.bin
+ *
+ * This legacy file is always smaller than 2K in size.
+ *
+ * Starting at family 15h they are in family specific firmware files:
+ *
+ * amd-ucode/microcode_amd_fam15h.bin
+ * amd-ucode/microcode_amd_fam16h.bin
+ * ...
+ *
+ * These might be larger than 2K.
+ */
static enum ucode_state request_microcode_amd(int cpu, struct device *device)
{
- const char *fw_name = "amd-ucode/microcode_amd.bin";
+ char fw_name[36] = "amd-ucode/microcode_amd.bin";
const struct firmware *fw;
enum ucode_state ret = UCODE_NFOUND;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->x86 >= 0x15)
+ snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
- if (request_firmware(&fw, fw_name, device)) {
+ if (request_firmware(&fw, (const char *)fw_name, device)) {
pr_err("failed to load file %s\n", fw_name);
goto out;
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 9103b89..0741b062 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
}
#endif
+ set_bit(m->busid, mp_bus_not_pci);
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
- set_bit(m->busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 9242436..d4a705f 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
*/
/*
- * Some machines require the "reboot=b" commandline option,
+ * Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_KBD) {
+ reboot_type = BOOT_KBD;
+ printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
{ /* Handle reboot issue on Acer Aspire one */
- .callback = set_bios_reboot,
+ .callback = set_kbd_reboot,
.ident = "Acer Aspire One A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dbe34b9..dd74e46 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -108,16 +108,6 @@ static inline void get_head_page_multiple(struct page *page, int nr)
SetPageReferenced(page);
}
-static inline void get_huge_page_tail(struct page *page)
-{
- /*
- * __split_huge_page_refcount() cannot run
- * from under us.
- */
- VM_BUG_ON(atomic_read(&page->_count) < 0);
- atomic_inc(&page->_count);
-}
-
static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
@@ -211,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index b499626..f4f29b1 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
+ arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 1dab519..f927429 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
*/
if (current->flags & PF_RANDOMIZE) {
if (mmap_is_ia32())
- rnd = (long)get_random_int() % (1<<8);
+ rnd = get_random_int() % (1<<8);
else
- rnd = (long)(get_random_int() % (1<<28));
+ rnd = get_random_int() % (1<<28);
}
return rnd << PAGE_SHIFT;
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 81dbfde..7efd0c6 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -104,6 +104,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
+ if (acpi_srat_revision >= 2)
+ pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -155,6 +157,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
+ if (acpi_srat_revision <= 1)
+ pxm &= 0xff;
node = setup_node(pxm);
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index bfab3fa..7c1b765 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
cleanup_addr = proglen; /* epilogue address */
for (pass = 0; pass < 10; pass++) {
+ u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
/* no prologue/epilogue for trivial filters (RET something) */
proglen = 0;
prog = temp;
- if (seen) {
+ if (seen_or_pass0) {
EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
/* note : must save %rbx in case bpf_error is hit */
- if (seen & (SEEN_XREG | SEEN_DATAREF))
+ if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
- if (seen & SEEN_XREG)
+ if (seen_or_pass0 & SEEN_XREG)
CLEAR_X(); /* make sure we dont leek kernel memory */
/*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
* r9 = skb->len - skb->data_len
* r8 = skb->data
*/
- if (seen & SEEN_DATAREF) {
+ if (seen_or_pass0 & SEEN_DATAREF) {
if (offsetof(struct sk_buff, len) <= 127)
/* mov off8(%rdi),%r9d */
EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_DIV_X: /* A /= X; */
seen |= SEEN_XREG;
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
- if (pc_ret0 != -1)
- EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
- else {
+ if (pc_ret0 > 0) {
+ /* addrs[pc_ret0 - 1] is start address of target
+ * (addrs[i] - 4) is the address following this jmp
+ * ("xor %edx,%edx; div %ebx" being 4 bytes long)
+ */
+ EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
+ (addrs[i] - 4));
+ } else {
EMIT_COND_JMP(X86_JNE, 2 + 5);
CLEAR_A();
EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
}
/* fallinto */
case BPF_S_RET_A:
- if (seen) {
+ if (seen_or_pass0) {
if (i != flen - 1) {
EMIT_JMP(cleanup_addr - addrs[i]);
break;
}
- if (seen & SEEN_XREG)
+ if (seen_or_pass0 & SEEN_XREG)
EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
EMIT1(0xc9); /* leaveq */
}
@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
goto common_load;
case BPF_S_LDX_B_MSH:
if ((int)K < 0) {
- if (pc_ret0 != -1) {
- EMIT_JMP(addrs[pc_ret0] - addrs[i]);
+ if (pc_ret0 > 0) {
+ /* addrs[pc_ret0 - 1] is the start address */
+ EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
break;
}
CLEAR_A();
@@ -568,8 +575,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
break;
}
if (filter[i].jt != 0) {
- if (filter[i].jf)
- t_offset += is_near(f_offset) ? 2 : 6;
+ if (filter[i].jf && f_offset)
+ t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);
@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
* use it to give the cleanup instruction(s) addr
*/
cleanup_addr = proglen - 1; /* ret */
- if (seen)
+ if (seen_or_pass0)
cleanup_addr -= 1; /* leaveq */
- if (seen & SEEN_XREG)
+ if (seen_or_pass0 & SEEN_XREG)
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
if (image) {
- WARN_ON(proglen != oldproglen);
+ if (proglen != oldproglen)
+ pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
break;
}
if (proglen == oldproglen) {
diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
index cdfe4c5..f148cf6 100644
--- a/arch/x86/oprofile/init.c
+++ b/arch/x86/oprofile/init.c
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
extern void op_nmi_exit(void);
extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+static int nmi_timer;
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
#ifdef CONFIG_X86_LOCAL_APIC
ret = op_nmi_init(ops);
#endif
+ nmi_timer = (ret != 0);
#ifdef CONFIG_X86_IO_APIC
- if (ret < 0)
+ if (nmi_timer)
ret = op_nmi_timer_init(ops);
#endif
ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
void oprofile_arch_exit(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
- op_nmi_exit();
+ if (!nmi_timer)
+ op_nmi_exit();
#endif
}
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index 6b8759f..d24d3da 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -18,8 +18,9 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
obj-$(CONFIG_X86_MRST) += mrst.o
obj-y += common.o early.o
-obj-y += amd_bus.o bus_numa.o
+obj-y += bus_numa.o
+obj-$(CONFIG_AMD_NB) += amd_bus.o
obj-$(CONFIG_PCI_CNB20LE_QUIRK) += broadcom_bus.o
ifeq ($(CONFIG_PCI_DEBUG),y)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 50b3f14..53f9e68 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -149,7 +149,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
- u64 start, end;
+ u64 start, orig_end, end;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -165,7 +165,21 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
start = addr.minimum + addr.translation_offset;
- end = addr.maximum + addr.translation_offset;
+ orig_end = end = addr.maximum + addr.translation_offset;
+
+ /* Exclude non-addressable range or non-addressable portion of range */
+ end = min(end, (u64)iomem_resource.end);
+ if (end <= start) {
+ dev_info(&info->bridge->dev,
+ "host bridge window [%#llx-%#llx] "
+ "(ignored, not CPU addressable)\n", start, orig_end);
+ return AE_OK;
+ } else if (orig_end != end) {
+ dev_info(&info->bridge->dev,
+ "host bridge window [%#llx-%#llx] "
+ "([%#llx-%#llx] ignored, not CPU addressable)\n",
+ start, orig_end, end + 1, orig_end);
+ }
res = &info->res[info->res_num];
res->name = info->name;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 026e493..385a940 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -30,34 +30,6 @@ static struct pci_hostbridge_probe pci_probes[] __initdata = {
{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
};
-static u64 __initdata fam10h_mmconf_start;
-static u64 __initdata fam10h_mmconf_end;
-static void __init get_pci_mmcfg_amd_fam10h_range(void)
-{
- u32 address;
- u64 base, msr;
- unsigned segn_busn_bits;
-
- /* assume all cpus from fam10h have mmconf */
- if (boot_cpu_data.x86 < 0x10)
- return;
-
- address = MSR_FAM10H_MMIO_CONF_BASE;
- rdmsrl(address, msr);
-
- /* mmconfig is not enable */
- if (!(msr & FAM10H_MMIO_CONF_ENABLE))
- return;
-
- base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
-
- segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
- FAM10H_MMIO_CONF_BUSRANGE_MASK;
-
- fam10h_mmconf_start = base;
- fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
-}
-
#define RANGE_NUM 16
/**
@@ -85,6 +57,9 @@ static int __init early_fill_mp_bus_info(void)
u64 val;
u32 address;
bool found;
+ struct resource fam10h_mmconf_res, *fam10h_mmconf;
+ u64 fam10h_mmconf_start;
+ u64 fam10h_mmconf_end;
if (!early_pci_allowed())
return -1;
@@ -211,12 +186,17 @@ static int __init early_fill_mp_bus_info(void)
subtract_range(range, RANGE_NUM, 0, end);
/* get mmconfig */
- get_pci_mmcfg_amd_fam10h_range();
+ fam10h_mmconf = amd_get_mmconfig_range(&fam10h_mmconf_res);
/* need to take out mmconf range */
- if (fam10h_mmconf_end) {
- printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
+ if (fam10h_mmconf) {
+ printk(KERN_DEBUG "Fam 10h mmconf %pR\n", fam10h_mmconf);
+ fam10h_mmconf_start = fam10h_mmconf->start;
+ fam10h_mmconf_end = fam10h_mmconf->end;
subtract_range(range, RANGE_NUM, fam10h_mmconf_start,
fam10h_mmconf_end + 1);
+ } else {
+ fam10h_mmconf_start = 0;
+ fam10h_mmconf_end = 0;
}
/* mmio resource */
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 7000e74..fe73276 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -678,36 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
pentry = (struct sfi_device_table_entry *)sb->pentry;
for (i = 0; i < num; i++, pentry++) {
- if (pentry->irq != (u8)0xff) { /* native RTE case */
+ int irq = pentry->irq;
+
+ if (irq != (u8)0xff) { /* native RTE case */
/* these SPI2 devices are not exposed to system as PCI
* devices, but they have separate RTE entry in IOAPIC
* so we have to enable them one by one here
*/
- ioapic = mp_find_ioapic(pentry->irq);
+ ioapic = mp_find_ioapic(irq);
irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = pentry->irq;
+ irq_attr.ioapic_pin = irq;
irq_attr.trigger = 1;
irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
- }
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ } else
+ irq = 0; /* No irq */
+
switch (pentry->type) {
case SFI_DEV_TYPE_IPC:
/* ID as IRQ is a hack that will go away */
- pdev = platform_device_alloc(pentry->name, pentry->irq);
+ pdev = platform_device_alloc(pentry->name, irq);
if (pdev == NULL) {
pr_err("out of memory for SFI platform device '%s'.\n",
pentry->name);
continue;
}
- install_irq_resource(pdev, pentry->irq);
+ install_irq_resource(pdev, irq);
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
- "irq = 0x%2x\n", i, pentry->name, pentry->irq);
+ "irq = 0x%2x\n", i, pentry->name, irq);
sfi_handle_ipc_dev(pdev);
break;
case SFI_DEV_TYPE_SPI:
memset(&spi_info, 0, sizeof(spi_info));
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
- spi_info.irq = pentry->irq;
+ spi_info.irq = irq;
spi_info.bus_num = pentry->host_num;
spi_info.chip_select = pentry->addr;
spi_info.max_speed_hz = pentry->max_freq;
@@ -724,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
memset(&i2c_info, 0, sizeof(i2c_info));
bus = pentry->host_num;
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
- i2c_info.irq = pentry->irq;
+ i2c_info.irq = irq;
i2c_info.addr = pentry->addr;
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
"irq = 0x%2x, addr = 0x%x\n", i, bus,
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 68e467f..edf435b 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -115,9 +115,6 @@ early_param("nobau", setup_nobau);
/* base pnode in this partition */
static int uv_base_pnode __read_mostly;
-/* position of pnode (which is nasid>>1): */
-static int uv_nshift __read_mostly;
-static unsigned long uv_mmask __read_mostly;
static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
@@ -1426,7 +1423,7 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
{
int i;
int cpu;
- unsigned long pa;
+ unsigned long gpa;
unsigned long m;
unsigned long n;
size_t dsize;
@@ -1442,9 +1439,9 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
BUG_ON(!bau_desc);
- pa = uv_gpa(bau_desc); /* need the real nasid*/
- n = pa >> uv_nshift;
- m = pa & uv_mmask;
+ gpa = uv_gpa(bau_desc);
+ n = uv_gpa_to_gnode(gpa);
+ m = uv_gpa_to_offset(gpa);
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1516,9 +1513,9 @@ static void pq_init(int node, int pnode)
bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
}
/*
- * need the pnode of where the memory was really allocated
+ * need the gnode of where the memory was really allocated
*/
- pn = uv_gpa(pqp) >> uv_nshift;
+ pn = uv_gpa_to_gnode(uv_gpa(pqp));
first = uv_physnodeaddr(pqp);
pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
@@ -1578,14 +1575,14 @@ static int calculate_destination_timeout(void)
ts_ns = base * mult1 * mult2;
ret = ts_ns / 1000;
} else {
- /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
- mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+ /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
+ mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
- mult1 = 80;
+ base = 80;
else
- mult1 = 10;
- base = mmr_image & UV2_ACK_MASK;
+ base = 10;
+ mult1 = mmr_image & UV2_ACK_MASK;
ret = mult1 * base;
}
return ret;
@@ -1812,8 +1809,6 @@ static int __init uv_bau_init(void)
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
}
- uv_nshift = uv_hub_info->m_val;
- uv_mmask = (1UL << uv_hub_info->m_val) - 1;
nuvhubs = uv_num_possible_blades();
spin_lock_init(&disable_lock);
congested_cycles = usec_2_cycles(congested_respns_us);
@@ -1825,6 +1820,8 @@ static int __init uv_bau_init(void)
uv_base_pnode = uv_blade_to_pnode(uvhub);
}
+ enable_timeouts();
+
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
nobau = 1;
return 0;
@@ -1835,7 +1832,6 @@ static int __init uv_bau_init(void)
if (uv_blade_nr_possible_cpus(uvhub))
init_uvhub(uvhub, vector, uv_base_pnode);
- enable_timeouts();
alloc_intr_gate(vector, uv_bau_message_intr1);
for_each_possible_blade(uvhub) {
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 67d69f1..0fb662a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1337,7 +1337,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
int cpu = (long)hcpu;
switch (action) {
case CPU_UP_PREPARE:
- per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
+ xen_vcpu_setup(cpu);
if (xen_have_vector_callback)
xen_init_lock_cpu(cpu);
break;
@@ -1367,7 +1367,6 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_smp_init();
register_cpu_notifier(&xen_hvm_cpu_notifier);
xen_unplug_emulated_devices();
- have_vcpu_info_placement = 0;
x86_init.irqs.intr_init = xen_init_IRQ;
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index acea42e..f8dcda4 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -192,9 +192,21 @@ static unsigned long __init xen_get_max_pages(void)
domid_t domid = DOMID_SELF;
int ret;
- ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
- if (ret > 0)
- max_pages = ret;
+ /*
+ * For the initial domain we use the maximum reservation as
+ * the maximum page.
+ *
+ * For guest domains the current maximum reservation reflects
+ * the current maximum rather than the static maximum. In this
+ * case the e820 map provided to us will cover the static
+ * maximum region.
+ */
+ if (xen_initial_domain()) {
+ ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+ if (ret > 0)
+ max_pages = ret;
+ }
+
return min(max_pages, MAX_DOMAIN_PAGES);
}