aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-19 13:42:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-19 13:42:43 -0700
commit95c46afe6034d15bdf0f95d69f25489cecad9a47 (patch)
treea3bff5de25890f6de94cbf431bb0f94073893507
parentcf3966bf6ecfe25a75d625771e327b84878bc09d (diff)
parent191aee58b6568cf8143901bfa3f57a9b8faa6f1c (diff)
downloadkernel_samsung_aries-95c46afe6034d15bdf0f95d69f25489cecad9a47.zip
kernel_samsung_aries-95c46afe6034d15bdf0f95d69f25489cecad9a47.tar.gz
kernel_samsung_aries-95c46afe6034d15bdf0f95d69f25489cecad9a47.tar.bz2
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: powerpc: Remove IOMMU_VMERGE config option powerpc: Fix swiotlb to respect the boot option powerpc: Do not call prink when CONFIG_PRINTK is not defined powerpc: Use correct ccr bit for syscall error status powerpc/fsl-booke: Get coherent bit from PTE powerpc/85xx: Make sure lwarx hint isn't set on ppc32
-rw-r--r--arch/powerpc/Kconfig13
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/syscall.h6
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S7
-rw-r--r--arch/powerpc/kernel/iommu.c7
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c6
-rw-r--r--arch/powerpc/mm/mem.c6
8 files changed, 17 insertions, 40 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 8a54eb8..2e19500 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU
It is recommended that you build a soft-float userspace instead.
-config IOMMU_VMERGE
- bool "Enable IOMMU virtual merging"
- depends on PPC64
- default y
- help
- Cause IO segments sent to a device for DMA to be merged virtually
- by the IOMMU when they happen to have been allocated contiguously.
- This doesn't add pressure to the IOMMU allocator. However, some
- drivers don't support getting large merged segments coming back
- from *_map_sg().
-
- Most drivers don't have this problem; it is safe to say Y here.
-
config IOMMU_HELPER
def_bool PPC64
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index aea7147..d553bbe 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -25,7 +25,7 @@
#define PPC_INST_LDARX 0x7c0000a8
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
-#define PPC_INST_LWARX 0x7c000029
+#define PPC_INST_LWARX 0x7c000028
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_LXVD2X 0x7c000698
#define PPC_INST_MCRXR 0x7c000400
@@ -62,8 +62,8 @@
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
#define __PPC_WC(w) (((w) & 0x3) << 21)
/*
- * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have
- * any side effects on all 32bit processors, we can do this all the time.
+ * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
+ * larx with EH set as an illegal instruction.
*/
#ifdef CONFIG_PPC64
#define __PPC_EH(eh) (((eh) & 0x1) << 0)
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index efa7f0b..23913e9 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0;
+ return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
int error, long val)
{
if (error) {
- regs->ccr |= 0x1000L;
+ regs->ccr |= 0x10000000L;
regs->gpr[3] = -error;
} else {
- regs->ccr &= ~0x1000L;
+ regs->ccr &= ~0x10000000L;
regs->gpr[3] = val;
}
}
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 25793bb..7255265 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -747,9 +747,6 @@ finish_tlb_load:
#else
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
#endif
-#ifdef CONFIG_SMP
- ori r12, r12, MAS2_M
-#endif
mtspr SPRN_MAS2, r12
#ifdef CONFIG_PTE_64BIT
@@ -887,13 +884,17 @@ KernelSPE:
lwz r3,_MSR(r1)
oris r3,r3,MSR_SPE@h
stw r3,_MSR(r1) /* enable use of SPE after return */
+#ifdef CONFIG_PRINTK
lis r3,87f@h
ori r3,r3,87f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
bl printk
+#endif
b ret_from_except
+#ifdef CONFIG_PRINTK
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
+#endif
.align 4,0
#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 5547ae6..ec94f90 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -42,12 +42,7 @@
#define DBG(...)
-#ifdef CONFIG_IOMMU_VMERGE
-static int novmerge = 0;
-#else
-static int novmerge = 1;
-#endif
-
+static int novmerge;
static int protect4gb = 1;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b152de3..8f58986 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,7 +39,6 @@
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/mmu_context.h>
-#include <asm/swiotlb.h>
#include "setup.h"
@@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
-#ifdef CONFIG_SWIOTLB
- if (ppc_swiotlb_enable)
- swiotlb_init(1);
-#endif
-
paging_init();
/* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6354739..9143891 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,7 +61,6 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
-#include <asm/swiotlb.h>
#include <asm/mmu_context.h>
#include "setup.h"
@@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
-#ifdef CONFIG_SWIOTLB
- if (ppc_swiotlb_enable)
- swiotlb_init(1);
-#endif
-
paging_init();
/* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 311224c..448f972 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -48,6 +48,7 @@
#include <asm/sparsemem.h>
#include <asm/vdso.h>
#include <asm/fixmap.h>
+#include <asm/swiotlb.h>
#include "mmu_decl.h"
@@ -320,6 +321,11 @@ void __init mem_init(void)
struct page *page;
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
+#ifdef CONFIG_SWIOTLB
+ if (ppc_swiotlb_enable)
+ swiotlb_init(1);
+#endif
+
num_physpages = lmb.memory.size >> PAGE_SHIFT;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);