aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-11-23 14:49:20 -0800
committerH. Peter Anvin <hpa@zytor.com>2009-11-23 17:09:59 -0800
commit8a27138924f64d2f30c1022f909f74480046bc3f (patch)
tree72d7d40e1cba07b08fea49a064c6388a6d2bd780 /arch
parent55a6ca25472ee01574bfc24d23b7f5fa09cc38cf (diff)
downloadkernel_goldelico_gta04-8a27138924f64d2f30c1022f909f74480046bc3f.zip
kernel_goldelico_gta04-8a27138924f64d2f30c1022f909f74480046bc3f.tar.gz
kernel_goldelico_gta04-8a27138924f64d2f30c1022f909f74480046bc3f.tar.bz2
x86, mm: is_untracked_pat_range() takes a normal semiclosed range
is_untracked_pat_range() -- like its components, is_ISA_range() and is_GRU_range(), takes a normal semiclosed interval (>=, <) whereas the PAT code called it as if it took a closed range (>=, <=). Fix. Although this is a bug, I believe it is non-manifest, simply because none of the callers will call this with non-page-aligned addresses. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> LKML-Reference: <20091119202341.GA4420@sgi.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/mm/pat.c6
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1de2094..a34c785 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -274,7 +274,7 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
/*
* PAT type is always WB for untracked ranges, so no need to check.
*/
- if (x86_platform.is_untracked_pat_range(paddr, paddr + size - 1))
+ if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
return 1;
/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 38a66ef..b5bc08c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -394,7 +394,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
}
/* Low ISA region is always mapped WB in page table. No need to track */
- if (x86_platform.is_untracked_pat_range(start, end - 1)) {
+ if (x86_platform.is_untracked_pat_range(start, end)) {
if (new_type)
*new_type = _PAGE_CACHE_WB;
return 0;
@@ -505,7 +505,7 @@ int free_memtype(u64 start, u64 end)
return 0;
/* Low ISA region is always mapped WB. No need to track */
- if (x86_platform.is_untracked_pat_range(start, end - 1))
+ if (x86_platform.is_untracked_pat_range(start, end))
return 0;
is_range_ram = pat_pagerange_is_ram(start, end);
@@ -588,7 +588,7 @@ static unsigned long lookup_memtype(u64 paddr)
int rettype = _PAGE_CACHE_WB;
struct memtype *entry;
- if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE - 1))
+ if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
return rettype;
if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {