aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-21 12:43:15 -0700
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-21 12:45:27 -0700
commit5e88353d8b5f483bc1c873ad24ac2b59a6b66c73 (patch)
treef5651873e535e4feabea0da5d253d0326b4bd029 /arch/x86/include
parent40d3cd6695014bf3c44e2ca66b610b18acaf923d (diff)
downloadkernel_goldelico_gta04-5e88353d8b5f483bc1c873ad24ac2b59a6b66c73.zip
kernel_goldelico_gta04-5e88353d8b5f483bc1c873ad24ac2b59a6b66c73.tar.gz
kernel_goldelico_gta04-5e88353d8b5f483bc1c873ad24ac2b59a6b66c73.tar.bz2
x86, smap: Reduce the SMAP overhead for signal handling
Signal handling contains a bunch of accesses to individual user space items, which causes an excessive number of STAC and CLAC instructions. Instead, let get/put_user_try ... get/put_user_catch() contain the STAC and CLAC instructions. This means that get/put_user_try no longer nests, and furthermore that it is no longer legal to use user space access functions other than __get/put_user_ex() inside those blocks. However, these macros are x86-specific anyway and are only used in the signal-handling paths; a simple reordering of moving the larger subroutine calls out of the try...catch blocks resolves that problem. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Link: http://lkml.kernel.org/r/1348256595-29119-12-git-send-email-hpa@linux.intel.com
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/uaccess.h14
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index b92ece1..a91acfb 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -416,9 +416,8 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile(ASM_STAC "\n" \
- "1: mov"itype" %1,%"rtype"0\n" \
- "2: " ASM_CLAC "\n" \
+ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
+ "2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
@@ -460,9 +459,8 @@ struct __large_struct { unsigned long buf[100]; };
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
- asm volatile(ASM_STAC "\n" \
- "1: mov"itype" %"rtype"0,%1\n" \
- "2: " ASM_CLAC "\n" \
+ asm volatile("1: mov"itype" %"rtype"0,%1\n" \
+ "2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
@@ -470,13 +468,13 @@ struct __large_struct { unsigned long buf[100]; };
* uaccess_try and catch
*/
#define uaccess_try do { \
- int prev_err = current_thread_info()->uaccess_err; \
current_thread_info()->uaccess_err = 0; \
+ stac(); \
barrier();
#define uaccess_catch(err) \
+ clac(); \
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
- current_thread_info()->uaccess_err = prev_err; \
} while (0)
/**