aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/msr-on-cpu.c22
-rw-r--r--arch/x86/lib/usercopy_32.c7
2 files changed, 19 insertions, 10 deletions
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c
index d5a2b39..01b868b 100644
--- a/arch/x86/lib/msr-on-cpu.c
+++ b/arch/x86/lib/msr-on-cpu.c
@@ -30,10 +30,11 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
rv.msr_no = msr_no;
if (safe) {
- smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
- err = rv.err;
+ err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
+ &rv, 1);
+ err = err ? err : rv.err;
} else {
- smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+ err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
}
*l = rv.l;
*h = rv.h;
@@ -64,23 +65,24 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
rv.l = l;
rv.h = h;
if (safe) {
- smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
- err = rv.err;
+ err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
+ &rv, 1);
+ err = err ? err : rv.err;
} else {
- smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+ err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
}
return err;
}
-void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
+int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
- _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
+ return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
}
-void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
+int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
- _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
+ return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
}
/* These "safe" variants are slower and should be used when the target MSR
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 24e6094..9e68075 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -14,6 +14,13 @@
#include <asm/uaccess.h>
#include <asm/mmx.h>
+#ifdef CONFIG_X86_INTEL_USERCOPY
+/*
+ * Alignment at which movsl is preferred for bulk memory copies.
+ */
+struct movsl_mask movsl_mask __read_mostly;
+#endif
+
static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
{
#ifdef CONFIG_X86_INTEL_USERCOPY