aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTodd Poynor <toddpoynor@google.com>2013-01-15 19:33:11 -0800
committerTodd Poynor <toddpoynor@google.com>2013-01-15 19:33:11 -0800
commitfbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe (patch)
treeb7ed0216b986c7a0c8f7ab7f2b77191d9b070ead /arch
parent2627bab94940583e37f36ffa9f635e35a335c33d (diff)
parent2a68fec1118107fbaa40c631d45641d40d88ad71 (diff)
downloadkernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.zip
kernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.tar.gz
kernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.tar.bz2
Merge commit 'v3.0.58' into android-3.0
Change-Id: I05959ed26f71cf9197df59291e8e13f254b2115c
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/hwcap.h3
-rw-r--r--arch/arm/include/asm/vfpmacros.h12
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/mach-dove/include/mach/pm.h2
-rw-r--r--arch/arm/mach-dove/irq.c14
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c9
-rw-r--r--arch/cris/include/asm/io.h39
-rw-r--r--arch/m68k/include/asm/signal.h6
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/parisc/kernel/signal32.c6
-rw-r--r--arch/parisc/kernel/sys_parisc.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c18
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/s390/mm/gup.c2
-rw-r--r--arch/sparc/include/asm/hugetlb.h10
-rw-r--r--arch/sparc/kernel/signal_64.c4
-rw-r--r--arch/x86/include/asm/ptrace.h15
-rw-r--r--arch/x86/include/asm/system.h7
-rw-r--r--arch/x86/kernel/cpu/amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c29
-rw-r--r--arch/x86/kernel/hpet.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c4
-rw-r--r--arch/x86/kernel/process.c24
-rw-r--r--arch/x86/kernel/ptrace.c30
27 files changed, 172 insertions, 103 deletions
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index c1062c3..1e3f30f 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -18,8 +18,9 @@
#define HWCAP_THUMBEE 2048
#define HWCAP_NEON 4096
#define HWCAP_VFPv3 8192
-#define HWCAP_VFPv3D16 16384
+#define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
#define HWCAP_TLS 32768
+#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
/*
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index bf53047..c49c8f7 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -27,9 +27,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
@@ -51,9 +51,9 @@
#if __LINUX_ARM_ARCH__ <= 6
ldr \tmp, =elf_hwcap @ may not have MVFR regs
ldr \tmp, [\tmp, #0]
- tst \tmp, #HWCAP_VFPv3D16
- stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
- addne \base, \base, #32*4 @ step over unused register space
+ tst \tmp, #HWCAP_VFPD32
+ stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addeq \base, \base, #32*4 @ step over unused register space
#else
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5..0951a32 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -108,10 +108,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
+ down_read(&current->mm->mmap_sem);
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
+ up_read(&current->mm->mmap_sem);
info.si_signo = SIGSEGV;
info.si_errno = 0;
diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h
index 3ad9f94..11799c3 100644
--- a/arch/arm/mach-dove/include/mach/pm.h
+++ b/arch/arm/mach-dove/include/mach/pm.h
@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
static inline int irq_to_pmu(int irq)
{
- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
+ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
return irq - IRQ_DOVE_PMU_START;
return -EINVAL;
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index f07fd16..9f2fd10 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
int pin = irq_to_pmu(d->irq);
u32 u;
+ /*
+ * The PMU mask register is not RW0C: it is RW. This means that
+ * the bits take whatever value is written to them; if you write
+ * a '1', you will set the interrupt.
+ *
+ * Unfortunately this means there is NO race free way to clear
+ * these interrupts.
+ *
+ * So, let's structure the code so that the window is as small as
+ * possible.
+ */
u = ~(1 << (pin & 31));
- writel(u, PMU_INTERRUPT_CAUSE);
+ u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
+ writel_relaxed(u, PMU_INTERRUPT_CAUSE);
}
static struct irq_chip pmu_irq_chip = {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677..19d9369 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -467,7 +467,7 @@ static void __init build_mem_type_table(void)
}
for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
+ pteval_t v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 78829fa..192e9dd 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -587,11 +587,14 @@ static int __init vfp_init(void)
elf_hwcap |= HWCAP_VFPv3;
/*
- * Check for VFPv3 D16. CPUs in this configuration
- * only have 16 x 64bit registers.
+ * Check for VFPv3 D16 and VFPv4 D16. CPUs in
+ * this configuration only have 16 x 64bit
+ * registers.
*/
if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
- elf_hwcap |= HWCAP_VFPv3D16;
+ elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
+ else
+ elf_hwcap |= HWCAP_VFPD32;
}
#endif
#ifdef CONFIG_NEON
diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h
index 32567bc..ac12ae2 100644
--- a/arch/cris/include/asm/io.h
+++ b/arch/cris/include/asm/io.h
@@ -133,12 +133,39 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
#define insb(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,1,count) : 0)
#define insw(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,2,count) : 0)
#define insl(port,addr,count) (cris_iops ? cris_iops->read_io(port,addr,4,count) : 0)
-#define outb(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,1,1)
-#define outw(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,2,1)
-#define outl(data,port) if (cris_iops) cris_iops->write_io(port,(void*)(unsigned)data,4,1)
-#define outsb(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,1,count)
-#define outsw(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,2,count)
-#define outsl(port,addr,count) if(cris_iops) cris_iops->write_io(port,(void*)addr,3,count)
+static inline void outb(unsigned char data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 1, 1);
+}
+static inline void outw(unsigned short data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 2, 1);
+}
+static inline void outl(unsigned int data, unsigned int port)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *) &data, 4, 1);
+}
+static inline void outsb(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 1, count);
+}
+static inline void outsw(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 2, count);
+}
+static inline void outsl(unsigned int port, const void *addr,
+ unsigned long count)
+{
+ if (cris_iops)
+ cris_iops->write_io(port, (void *)addr, 4, count);
+}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c7..0b6b0e5 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -156,7 +156,7 @@ typedef struct sigaltstack {
static inline void sigaddset(sigset_t *set, int _sig)
{
asm ("bfset %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
@@ -164,7 +164,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
static inline void sigdelset(sigset_t *set, int _sig)
{
asm ("bfclr %0{%1,#1}"
- : "+od" (*set)
+ : "+o" (*set)
: "id" ((_sig - 1) ^ 31)
: "cc");
}
@@ -180,7 +180,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
int ret;
asm ("bfextu %1{%2,#1},%0"
: "=d" (ret)
- : "od" (*set), "id" ((_sig-1) ^ 31)
+ : "o" (*set), "id" ((_sig-1) ^ 31)
: "cc");
return ret;
}
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 884819c..9aa60f6 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -236,7 +236,7 @@ KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
sed -e 's/^\#define /-D/' -e "s/ /='/" -e "s/$$/'/")
ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 83bba33..8b3c62c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -100,7 +100,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
obj-$(CONFIG_OF) += prom.o
-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index e141324..d0ea054 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
{
compat_sigset_t s;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
sigset_64to32(&s, set);
return copy_to_user(up, &s, sizeof s);
@@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
compat_sigset_t s;
int r;
- if (sz != sizeof *set) panic("put_sigset32()");
+ if (sz != sizeof *set)
+ return -EINVAL;
if ((r = copy_from_user(&s, up, sz)) == 0) {
sigset_32to64(set, &s);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b9322..7ea75d1 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
struct vm_area_struct *vma;
int offset = mapping ? get_offset(mapping) : 0;
+ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
addr = DCACHE_ALIGN(addr - offset) + offset;
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index cb22024..9321d0f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1497,9 +1497,14 @@ long arch_ptrace(struct task_struct *child, long request,
if (index < PT_FPR0) {
tmp = ptrace_get_reg(child, (int) index);
} else {
+ unsigned int fpidx = index - PT_FPR0;
+
flush_fp_to_thread(child);
- tmp = ((unsigned long *)child->thread.fpr)
- [TS_FPRWIDTH * (index - PT_FPR0)];
+ if (fpidx < (PT_FPSCR - PT_FPR0))
+ tmp = ((unsigned long *)child->thread.fpr)
+ [fpidx * TS_FPRWIDTH];
+ else
+ tmp = child->thread.fpscr.val;
}
ret = put_user(tmp, datalp);
break;
@@ -1525,9 +1530,14 @@ long arch_ptrace(struct task_struct *child, long request,
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
+ unsigned int fpidx = index - PT_FPR0;
+
flush_fp_to_thread(child);
- ((unsigned long *)child->thread.fpr)
- [TS_FPRWIDTH * (index - PT_FPR0)] = data;
+ if (fpidx < (PT_FPSCR - PT_FPR0))
+ ((unsigned long *)child->thread.fpr)
+ [fpidx * TS_FPRWIDTH] = data;
+ else
+ child->thread.fpscr.val = data;
ret = 0;
}
break;
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index f0f2199..cd51a5c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -192,6 +192,14 @@ static ssize_t show_dscr_default(struct sysdev_class *class,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct sysdev_class *class,
struct sysdev_class_attribute *attr, const char *buf,
size_t count)
@@ -204,6 +212,8 @@ static ssize_t __used store_dscr_default(struct sysdev_class *class,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1a01414..6889f26 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -935,8 +935,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 65cb06e..4ccf9f5 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
- if (end < start)
+ if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 1770610..f368cef 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -58,14 +58,20 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- ptep_set_wrprotect(mm, addr, ptep);
+ pte_t old_pte = *ptep;
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+ int changed = !pte_same(*ptep, pte);
+ if (changed) {
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ flush_tlb_page(vma, addr);
+ }
+ return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index d58260b..77d4761 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -309,9 +309,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
err |= restore_fpu_state(regs, fpu_save);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
- if (err)
+ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 94e7618..f332d64 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -187,21 +187,14 @@ static inline int v8086_mode(struct pt_regs *regs)
#endif
}
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps. The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
#ifdef CONFIG_X86_32
- return (unsigned long)(&regs->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
#else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
return regs->sp;
-#endif
}
+#endif
#define GET_IP(regs) ((regs)->ip)
#define GET_FP(regs) ((regs)->bp)
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c2ff2a1..f0d89d9 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -93,10 +93,6 @@ do { \
"memory"); \
} while (0)
-/*
- * disable hlt during certain critical i/o operations
- */
-#define HAVE_DISABLE_HLT
#else
/* frame pointer must be last for get_wchan */
@@ -392,9 +388,6 @@ static inline void clflush(volatile void *__p)
#define nop() asm volatile ("nop")
-void disable_hlt(void);
-void enable_hlt(void);
-
void cpu_idle_wait(void);
extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8115040..3f4b6da 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -554,6 +554,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
}
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
+ */
+ if ((c->x86 == 0x15) &&
+ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+ u64 val;
+
+ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+ val |= 0x1E;
+ checking_wrmsrl(0xc0011021, val);
+ }
+ }
+
cpu_detect_cache_sizes(c);
/* Multi core CPU? */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 27c6251..99cd9d2 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -322,17 +322,6 @@ device_initcall(thermal_throttle_init_device);
#endif /* CONFIG_SYSFS */
-/*
- * Set up the most two significant bit to notify mce log that this thermal
- * event type.
- * This is a temp solution. May be changed in the future with mce log
- * infrasture.
- */
-#define CORE_THROTTLED (0)
-#define CORE_POWER_LIMIT ((__u64)1 << 62)
-#define PACKAGE_THROTTLED ((__u64)2 << 62)
-#define PACKAGE_POWER_LIMIT ((__u64)3 << 62)
-
static void notify_thresholds(__u64 msr_val)
{
/* check whether the interrupt handler is defined;
@@ -362,27 +351,23 @@ static void intel_thermal_interrupt(void)
if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
THERMAL_THROTTLING_EVENT,
CORE_LEVEL) != 0)
- mce_log_therm_throt_event(CORE_THROTTLED | msr_val);
+ mce_log_therm_throt_event(msr_val);
if (this_cpu_has(X86_FEATURE_PLN))
- if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
+ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
- CORE_LEVEL) != 0)
- mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val);
+ CORE_LEVEL);
if (this_cpu_has(X86_FEATURE_PTS)) {
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
- if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
+ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT,
THERMAL_THROTTLING_EVENT,
- PACKAGE_LEVEL) != 0)
- mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val);
+ PACKAGE_LEVEL);
if (this_cpu_has(X86_FEATURE_PLN))
- if (therm_throt_process(msr_val &
+ therm_throt_process(msr_val &
PACKAGE_THERM_STATUS_POWER_LIMIT,
POWER_LIMIT_EVENT,
- PACKAGE_LEVEL) != 0)
- mce_log_therm_throt_event(PACKAGE_POWER_LIMIT
- | msr_val);
+ PACKAGE_LEVEL);
}
}
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index aa083d3..0aa649e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -427,7 +427,7 @@ void hpet_msi_unmask(struct irq_data *data)
/* unmask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
- cfg |= HPET_TN_FSB;
+ cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
@@ -438,7 +438,7 @@ void hpet_msi_mask(struct irq_data *data)
/* mask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
- cfg &= ~HPET_TN_FSB;
+ cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index b727450..53ab9ff 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -162,6 +162,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
#define F1XH_MPB_MAX_SIZE 2048
#define F14H_MPB_MAX_SIZE 1824
#define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
switch (c->x86) {
case 0x14:
@@ -170,6 +171,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size)
case 0x15:
max_size = F15H_MPB_MAX_SIZE;
break;
+ case 0x16:
+ max_size = F16H_MPB_MAX_SIZE;
+ break;
default:
max_size = F1XH_MPB_MAX_SIZE;
break;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e1ba8cb..4272502 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -341,34 +341,10 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
#endif
-#ifdef CONFIG_X86_32
-/*
- * This halt magic was a workaround for ancient floppy DMA
- * wreckage. It should be safe to remove.
- */
-static int hlt_counter;
-void disable_hlt(void)
-{
- hlt_counter++;
-}
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
- hlt_counter--;
-}
-EXPORT_SYMBOL(enable_hlt);
-
-static inline int hlt_use_halt(void)
-{
- return (!hlt_counter && boot_cpu_data.hlt_works_ok);
-}
-#else
static inline int hlt_use_halt(void)
{
return 1;
}
-#endif
/*
* We use this if we don't have any better
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 807c2a2..911e16d 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -21,6 +21,7 @@
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -164,6 +165,35 @@ static inline bool invalid_selector(u16 value)
#define FLAG_MASK FLAG_MASK_32
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps. The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+ *
+ * Now, if the stack is empty, '&regs->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+ unsigned long sp = (unsigned long)&regs->sp;
+ struct thread_info *tinfo;
+
+ if (context == (sp & ~(THREAD_SIZE - 1)))
+ return sp;
+
+ tinfo = (struct thread_info *)context;
+ if (tinfo->previous_esp)
+ return tinfo->previous_esp;
+
+ return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
{
BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);