aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/tlb.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2012-06-28 09:02:18 +0800
committerH. Peter Anvin <hpa@zytor.com>2012-06-27 19:29:09 -0700
commitd8dfe60d6dcad5989c4558b753b98d657e2813c0 (patch)
tree82fd0ea40feab13ab533a5851b6371c1a7a05a59 /arch/x86/mm/tlb.c
parente7b52ffd45a6d834473f43b349e7d86593d763c7 (diff)
downloadkernel_goldelico_gta04-d8dfe60d6dcad5989c4558b753b98d657e2813c0.zip
kernel_goldelico_gta04-d8dfe60d6dcad5989c4558b753b98d657e2813c0.tar.gz
kernel_goldelico_gta04-d8dfe60d6dcad5989c4558b753b98d657e2813c0.tar.bz2
x86/tlb: fall back to flush all when meet a THP large page
We don't need to flush large pages by PAGE_SIZE step, that just waste time. and actually, large page don't need 'invlpg' optimizing according to our micro benchmark. So, just flush whole TLB is enough for them. The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine, with THP 'always' setting. Multi-thread testing, '-t' paramter is thread number: without this patch with this patch ./mprotect -t 1 14ns 13ns ./mprotect -t 2 13ns 13ns ./mprotect -t 4 12ns 11ns ./mprotect -t 8 14ns 10ns ./mprotect -t 16 28ns 28ns ./mprotect -t 32 54ns 52ns ./mprotect -t 128 200ns 200ns Signed-off-by: Alex Shi <alex.shi@intel.com> Link: http://lkml.kernel.org/r/1340845344-27557-4-git-send-email-alex.shi@intel.com Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r--arch/x86/mm/tlb.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3b91c98..184a02a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm)
#define FLUSHALL_BAR 16
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long has_large_page(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ unsigned long addr = ALIGN(start, HPAGE_SIZE);
+ for (; addr < end; addr += HPAGE_SIZE) {
+ pgd = pgd_offset(mm, addr);
+ if (likely(!pgd_none(*pgd))) {
+ pud = pud_offset(pgd, addr);
+ if (likely(!pud_none(*pud))) {
+ pmd = pmd_offset(pud, addr);
+ if (likely(!pmd_none(*pmd)))
+ if (pmd_large(*pmd))
+ return addr;
+ }
+ }
+ }
+ return 0;
+}
+#else
+static inline unsigned long has_large_page(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ return 0;
+}
+#endif
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm;
if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
+flush_all:
flush_tlb_mm(vma->vm_mm);
return;
}
@@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
local_flush_tlb();
else {
+ if (has_large_page(mm, start, end)) {
+ preempt_enable();
+ goto flush_all;
+ }
for (addr = start; addr < end;
addr += PAGE_SIZE)
__flush_tlb_single(addr);