diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-05-26 09:48:30 +0200 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2011-05-26 09:48:25 +0200 |
commit | 33ce614029576b8585e271fd7d90746a37114a15 (patch) | |
tree | 04f6a764b0ed82f0d6d0647b64547ad24d19830b /arch/s390 | |
parent | 99583181cbf2252dd0554eef6f419a6b22cd33ea (diff) | |
download | kernel_goldelico_gta04-33ce614029576b8585e271fd7d90746a37114a15.zip kernel_goldelico_gta04-33ce614029576b8585e271fd7d90746a37114a15.tar.gz kernel_goldelico_gta04-33ce614029576b8585e271fd7d90746a37114a15.tar.bz2 |
[S390] mm: add page fault retry handling
s390 arch backend for d065bd81 "mm: retry page fault when blocking on
disk transfer".
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/mm/fault.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b57723a..fe103e8 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -280,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access, struct mm_struct *mm; struct vm_area_struct *vma; unsigned long address; - int fault, write; + unsigned int flags; + int fault; if (notify_page_fault(regs)) return 0; @@ -299,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access, address = trans_exc_code & __FAIL_ADDR_MASK; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + flags = FAULT_FLAG_ALLOW_RETRY; + if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) + flags |= FAULT_FLAG_WRITE; +retry: down_read(&mm->mmap_sem); fault = VM_FAULT_BADMAP; @@ -328,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access, * make sure we exit gracefully rather than endlessly redo * the fault. */ - write = (access == VM_WRITE || - (trans_exc_code & store_indication) == 0x400) ? - FAULT_FLAG_WRITE : 0; - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + regs, address); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + regs, address); + } + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } } /* * The instruction that caused the program check will |