aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/cputable.c9
-rw-r--r--arch/powerpc/kernel/crash.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S95
-rw-r--r--arch/powerpc/kernel/entry_64.S101
-rw-r--r--arch/powerpc/kernel/head_32.S1
-rw-r--r--arch/powerpc/kernel/head_64.S123
-rw-r--r--arch/powerpc/kernel/irq.c30
-rw-r--r--arch/powerpc/kernel/lparcfg.c6
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c10
-rw-r--r--arch/powerpc/kernel/pci_64.c5
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c26
-rw-r--r--arch/powerpc/kernel/process.c39
-rw-r--r--arch/powerpc/kernel/prom.c54
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kernel/ptrace.c5
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c24
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c19
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c4
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/time.c545
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S4
28 files changed, 569 insertions, 606 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 840aad4..882889b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -92,7 +92,6 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
#ifdef CONFIG_PPC32
DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
@@ -137,6 +136,9 @@ int main(void)
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+ DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+ DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
+ DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3191be7..39e348a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -53,8 +53,10 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
PPC_FEATURE_HAS_MMU)
#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
-#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
-#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
+#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
PPC_FEATURE_BOOKE)
@@ -267,7 +269,8 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "Cell Broadband Engine",
.cpu_features = CPU_FTRS_CELL,
.cpu_user_features = COMMON_USER_PPC64 |
- PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_SMT,
.icache_bsize = 128,
.dcache_bsize = 128,
.cpu_setup = __setup_cpu_be,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 8c21d37..778f22f 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -134,8 +134,10 @@ static void crash_kexec_prepare_cpus(void)
* the crash CPU will send an IPI and wait for other CPUs to
* respond. If not, proceed the kexec boot even though we failed to
* capture other CPU states.
+ * Delay of at least 10 seconds.
*/
- msecs = 1000000;
+ printk(KERN_ALERT "Sending IPI to other cpus...\n");
+ msecs = 10000;
while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) {
barrier();
mdelay(1);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index f20a672..4827ca1 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -227,7 +227,7 @@ ret_from_syscall:
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
li r8,-_LAST_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmplw 0,r3,r8
blt+ syscall_exit_cont
@@ -287,8 +287,10 @@ syscall_dotrace:
syscall_exit_work:
andi. r0,r9,_TIF_RESTOREALL
- bne- 2f
- cmplw 0,r3,r8
+ beq+ 0f
+ REST_NVGPRS(r1)
+ b 2f
+0: cmplw 0,r3,r8
blt+ 1f
andi. r0,r9,_TIF_NOERROR
bne- 1f
@@ -302,9 +304,7 @@ syscall_exit_work:
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
beq 4f
- /* Clear per-syscall TIF flags if any are set, but _leave_
- _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
- yet. */
+ /* Clear per-syscall TIF flags if any are set. */
li r11,_TIF_PERSYSCALL_MASK
addi r12,r12,TI_FLAGS
@@ -318,8 +318,13 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
- beq 7f
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
+ beq ret_from_except
+
+ /* Re-enable interrupts */
+ ori r10,r10,MSR_EE
+ SYNC
+ MTMSRD(r10)
/* Save NVGPRS if they're not saved already */
lwz r4,_TRAP(r1)
@@ -328,71 +333,11 @@ syscall_exit_work:
SAVE_NVGPRS(r1)
li r4,0xc00
stw r4,_TRAP(r1)
-
- /* Re-enable interrupts */
-5: ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10)
-
- andi. r0,r9,_TIF_SAVE_NVGPRS
- bne save_user_nvgprs
-
-save_user_nvgprs_cont:
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- beq 7f
-
+5:
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_leave
- REST_NVGPRS(r1)
-
-6: lwz r3,GPR3(r1)
- LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
- SYNC
- MTMSRD(r10) /* disable interrupts again */
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
-7:
- andi. r0,r9,_TIF_NEED_RESCHED
- bne 8f
- lwz r5,_MSR(r1)
- andi. r5,r5,MSR_PR
- beq ret_from_except
- andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
- beq ret_from_except
- b do_user_signal
-8:
- ori r10,r10,MSR_EE
- SYNC
- MTMSRD(r10) /* re-enable interrupts */
- bl schedule
- b 6b
-
-save_user_nvgprs:
- lwz r8,TI_SIGFRAME(r12)
-
-.macro savewords start, end
- 1: stw \start,4*(\start)(r8)
- .section __ex_table,"a"
- .align 2
- .long 1b,save_user_nvgprs_fault
- .previous
- .if \end - \start
- savewords "(\start+1)",\end
- .endif
-.endm
- savewords 14,31
- b save_user_nvgprs_cont
-
-
-save_user_nvgprs_fault:
- li r3,11 /* SIGSEGV */
- lwz r4,TI_TASK(r12)
- bl force_sigsegv
+ b ret_from_except_full
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
- lwz r9,TI_FLAGS(r12)
- b save_user_nvgprs_cont
-
#ifdef SHOW_SYSCALLS
do_show_syscall:
#ifdef SHOW_SYSCALLS_TASK
@@ -490,6 +435,14 @@ ppc_clone:
stw r0,_TRAP(r1) /* register set saved */
b sys_clone
+ .globl ppc_swapcontext
+ppc_swapcontext:
+ SAVE_NVGPRS(r1)
+ lwz r0,_TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,_TRAP(r1) /* register set saved */
+ b sys_swapcontext
+
/*
* Top-level page fault handling.
* This is in assembler because if do_page_fault tells us that
@@ -683,7 +636,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r9,TI_FLAGS(r9)
- andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK)
+ andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
bne do_work
restore_user:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 79a0c91..1060155 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -61,6 +61,7 @@ system_call_common:
std r12,_MSR(r1)
std r0,GPR0(r1)
std r10,GPR1(r1)
+ ACCOUNT_CPU_USER_ENTRY(r10, r11)
std r2,GPR2(r1)
std r3,GPR3(r1)
std r4,GPR4(r1)
@@ -158,7 +159,7 @@ syscall_exit:
mtmsrd r10,1
ld r9,TI_FLAGS(r12)
li r11,-_LAST_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK)
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
bne- syscall_exit_work
cmpld r3,r11
ld r5,_CCR(r1)
@@ -168,8 +169,9 @@ syscall_error_cont:
stdcx. r0,0,r1 /* to clear the reservation */
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- beq- 1f /* only restore r13 if */
- ld r13,GPR13(r1) /* returning to usermode */
+ beq- 1f
+ ACCOUNT_CPU_USER_EXIT(r11, r12)
+ ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
1: ld r2,GPR2(r1)
li r12,MSR_RI
andc r11,r10,r12
@@ -214,8 +216,10 @@ syscall_exit_work:
If TIF_NOERROR is set, just save r3 as it is. */
andi. r0,r9,_TIF_RESTOREALL
- bne- 2f
- cmpld r3,r11 /* r10 is -LAST_ERRNO */
+ beq+ 0f
+ REST_NVGPRS(r1)
+ b 2f
+0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
blt+ 1f
andi. r0,r9,_TIF_NOERROR
bne- 1f
@@ -227,9 +231,7 @@ syscall_exit_work:
2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
beq 4f
- /* Clear per-syscall TIF flags if any are set, but _leave_
- _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
- yet. */
+ /* Clear per-syscall TIF flags if any are set. */
li r11,_TIF_PERSYSCALL_MASK
addi r12,r12,TI_FLAGS
@@ -238,10 +240,9 @@ syscall_exit_work:
stdcx. r10,0,r12
bne- 3b
subi r12,r12,TI_FLAGS
-
-4: bl .save_nvgprs
- /* Anything else left to do? */
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
+
+4: /* Anything else left to do? */
+ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
/* Re-enable interrupts */
@@ -249,26 +250,10 @@ syscall_exit_work:
ori r10,r10,MSR_EE
mtmsrd r10,1
- andi. r0,r9,_TIF_SAVE_NVGPRS
- bne save_user_nvgprs
-
- /* If tracing, re-enable interrupts and do it */
-save_user_nvgprs_cont:
- andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
- beq 5f
-
+ bl .save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_syscall_trace_leave
- REST_NVGPRS(r1)
- clrrdi r12,r1,THREAD_SHIFT
-
- /* Disable interrupts again and handle other work if any */
-5: mfmsr r10
- rldicl r10,r10,48,1
- rotldi r10,r10,16
- mtmsrd r10,1
-
- b .ret_from_except_lite
+ b .ret_from_except
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
@@ -280,51 +265,6 @@ _GLOBAL(save_nvgprs)
std r0,_TRAP(r1)
blr
-
-save_user_nvgprs:
- ld r10,TI_SIGFRAME(r12)
- andi. r0,r9,_TIF_32BIT
- beq- save_user_nvgprs_64
-
- /* 32-bit save to userspace */
-
-.macro savewords start, end
- 1: stw \start,4*(\start)(r10)
- .section __ex_table,"a"
- .align 3
- .llong 1b,save_user_nvgprs_fault
- .previous
- .if \end - \start
- savewords "(\start+1)",\end
- .endif
-.endm
- savewords 14,31
- b save_user_nvgprs_cont
-
-save_user_nvgprs_64:
- /* 64-bit save to userspace */
-
-.macro savelongs start, end
- 1: std \start,8*(\start)(r10)
- .section __ex_table,"a"
- .align 3
- .llong 1b,save_user_nvgprs_fault
- .previous
- .if \end - \start
- savelongs "(\start+1)",\end
- .endif
-.endm
- savelongs 14,31
- b save_user_nvgprs_cont
-
-save_user_nvgprs_fault:
- li r3,11 /* SIGSEGV */
- ld r4,TI_TASK(r12)
- bl .force_sigsegv
-
- clrrdi r12,r1,THREAD_SHIFT
- ld r9,TI_FLAGS(r12)
- b save_user_nvgprs_cont
/*
* The sigsuspend and rt_sigsuspend system calls can call do_signal
@@ -350,6 +290,16 @@ _GLOBAL(ppc_clone)
bl .sys_clone
b syscall_exit
+_GLOBAL(ppc32_swapcontext)
+ bl .save_nvgprs
+ bl .compat_sys_swapcontext
+ b syscall_exit
+
+_GLOBAL(ppc64_swapcontext)
+ bl .save_nvgprs
+ bl .sys_swapcontext
+ b syscall_exit
+
_GLOBAL(ret_from_fork)
bl .schedule_tail
REST_NVGPRS(r1)
@@ -536,6 +486,7 @@ restore:
* userspace
*/
beq 1f
+ ACCOUNT_CPU_USER_EXIT(r3, r4)
REST_GPR(13, r1)
1:
ld r3,_CTR(r1)
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 03b25f9..a0579e8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -714,6 +714,7 @@ AltiVecUnavailable:
#ifdef CONFIG_ALTIVEC
bne load_up_altivec /* if from user, just load it up */
#endif /* CONFIG_ALTIVEC */
+ addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
PerformanceMonitor:
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 7ebb736..35084f3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -137,7 +137,7 @@ _GLOBAL(__secondary_hold)
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
- /* Grab our linux cpu number */
+ /* Grab our physical cpu number */
mr r24,r3
/* Tell the master cpu we're here */
@@ -151,12 +151,7 @@ _GLOBAL(__secondary_hold)
cmpdi 0,r4,1
bne 100b
-#ifdef CONFIG_HMT
- SET_REG_IMMEDIATE(r4, .hmt_init)
- mtctr r4
- bctr
-#else
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
mtctr r4
mr r3,r24
@@ -164,7 +159,6 @@ _GLOBAL(__secondary_hold)
#else
BUG_OPCODE
#endif
-#endif
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
@@ -283,6 +277,7 @@ exception_marker:
std r10,0(r1); /* make stack chain pointer */ \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r10,GPR1(r1); /* save r1 in stackframe */ \
+ ACCOUNT_CPU_USER_ENTRY(r9, r10); \
std r2,GPR2(r1); /* save r2 in stackframe */ \
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
@@ -319,7 +314,6 @@ exception_marker:
label##_pSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
#define STD_EXCEPTION_ISERIES(n, label, area) \
@@ -327,7 +321,6 @@ label##_pSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(area); \
EXCEPTION_PROLOG_ISERIES_2; \
b label##_common
@@ -337,7 +330,6 @@ label##_iSeries: \
label##_iSeries: \
HMT_MEDIUM; \
mtspr SPRN_SPRG1,r13; /* save r13 */ \
- RUNLATCH_ON(r13); \
EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
lbz r10,PACAPROCENABLED(r13); \
cmpwi 0,r10,0; \
@@ -390,6 +382,7 @@ label##_common: \
label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
DISABLE_INTS; \
+ bl .ppc64_runlatch_on; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
b .ret_from_except_lite
@@ -407,7 +400,6 @@ __start_interrupts:
_machine_check_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
. = 0x300
@@ -434,7 +426,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
data_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13
- RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_DAR
@@ -460,7 +451,6 @@ data_access_slb_pSeries:
instruction_access_slb_pSeries:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13
- RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
@@ -491,7 +481,6 @@ instruction_access_slb_pSeries:
.globl system_call_pSeries
system_call_pSeries:
HMT_MEDIUM
- RUNLATCH_ON(r9)
mr r9,r13
mfmsr r10
mfspr r13,SPRN_SPRG3
@@ -575,7 +564,6 @@ slb_miss_user_pseries:
system_reset_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
.globl machine_check_fwnmi
@@ -583,7 +571,6 @@ system_reset_fwnmi:
machine_check_fwnmi:
HMT_MEDIUM
mtspr SPRN_SPRG1,r13 /* save r13 */
- RUNLATCH_ON(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
#ifdef CONFIG_PPC_ISERIES
@@ -858,6 +845,14 @@ fast_exception_return:
ld r11,_NIP(r1)
andi. r3,r12,MSR_RI /* check if RI is set */
beq- unrecov_fer
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ andi. r3,r12,MSR_PR
+ beq 2f
+ ACCOUNT_CPU_USER_EXIT(r3, r4)
+2:
+#endif
+
ld r3,_CCR(r1)
ld r4,_LINK(r1)
ld r5,_CTR(r1)
@@ -894,7 +889,6 @@ unrecov_fer:
.align 7
.globl data_access_common
data_access_common:
- RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
mfspr r10,SPRN_DAR
std r10,PACA_EXGEN+EX_DAR(r13)
mfspr r10,SPRN_DSISR
@@ -1042,6 +1036,7 @@ hardware_interrupt_common:
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
hardware_interrupt_entry:
DISABLE_INTS
+ bl .ppc64_runlatch_on
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_IRQ
b .ret_from_except_lite
@@ -1549,6 +1544,9 @@ _STATIC(__boot_from_prom)
mr r28,r6
mr r27,r7
+ /* Align the stack to 16-byte boundary for broken yaboot */
+ rldicr r1,r1,0,59
+
/* Make sure we are running in 64 bits mode */
bl .enable_64b_mode
@@ -1816,22 +1814,6 @@ _STATIC(start_here_multiplatform)
ori r6,r6,MSR_RI
mtmsrd r6 /* RI on */
-#ifdef CONFIG_HMT
- /* Start up the second thread on cpu 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x34 /* Pulsar */
- beq 90f
- cmpwi r3,0x36 /* Icestar */
- beq 90f
- cmpwi r3,0x37 /* SStar */
- beq 90f
- b 91f /* HMT not supported */
-90: li r3,0
- bl .hmt_start_secondary
-91:
-#endif
-
/* The following gets the stack and TOC set up with the regs */
/* pointing to the real addr of the kernel stack. This is */
/* all done to support the C function call below which sets */
@@ -1945,77 +1927,8 @@ _STATIC(start_here_common)
bl .start_kernel
-_GLOBAL(hmt_init)
-#ifdef CONFIG_HMT
- LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
- mfspr r7,SPRN_PVR
- srwi r7,r7,16
- cmpwi r7,0x34 /* Pulsar */
- beq 90f
- cmpwi r7,0x36 /* Icestar */
- beq 91f
- cmpwi r7,0x37 /* SStar */
- beq 91f
- b 101f
-90: mfspr r6,SPRN_PIR
- andi. r6,r6,0x1f
- b 92f
-91: mfspr r6,SPRN_PIR
- andi. r6,r6,0x3ff
-92: sldi r4,r24,3
- stwx r6,r5,r4
- bl .hmt_start_secondary
- b 101f
-
-__hmt_secondary_hold:
- LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
- clrldi r5,r5,4
- li r7,0
- mfspr r6,SPRN_PIR
- mfspr r8,SPRN_PVR
- srwi r8,r8,16
- cmpwi r8,0x34
- bne 93f
- andi. r6,r6,0x1f
- b 103f
-93: andi. r6,r6,0x3f
-
-103: lwzx r8,r5,r7
- cmpw r8,r6
- beq 104f
- addi r7,r7,8
- b 103b
-
-104: addi r7,r7,4
- lwzx r9,r5,r7
- mr r24,r9
-101:
-#endif
- mr r3,r24
- b .pSeries_secondary_smp_init
-
-#ifdef CONFIG_HMT
-_GLOBAL(hmt_start_secondary)
- LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold)
- clrldi r4,r4,4
- mtspr SPRN_NIADORM, r4
- mfspr r4, SPRN_MSRDORM
- li r5, -65
- and r4, r4, r5
- mtspr SPRN_MSRDORM, r4
- lis r4,0xffef
- ori r4,r4,0x7403
- mtspr SPRN_TSC, r4
- li r4,0x1f4
- mtspr SPRN_TST, r4
- mfspr r4, SPRN_HID0
- ori r4, r4, 0x1
- mtspr SPRN_HID0, r4
- mfspr r4, SPRN_CTRLF
- oris r4, r4, 0x40
- mtspr SPRN_CTRLT, r4
- blr
-#endif
+ /* Not reached */
+ BUG_OPCODE
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index edb2b00..24dc811 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -369,6 +369,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
return NO_IRQ;
}
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS];
@@ -392,10 +393,24 @@ void irq_ctx_init(void)
}
}
+static inline void do_softirq_onstack(void)
+{
+ struct thread_info *curtp, *irqtp;
+
+ curtp = current_thread_info();
+ irqtp = softirq_ctx[smp_processor_id()];
+ irqtp->task = curtp->task;
+ call_do_softirq(irqtp);
+ irqtp->task = NULL;
+}
+
+#else
+#define do_softirq_onstack() __do_softirq()
+#endif /* CONFIG_IRQSTACKS */
+
void do_softirq(void)
{
unsigned long flags;
- struct thread_info *curtp, *irqtp;
if (in_interrupt())
return;
@@ -403,19 +418,18 @@ void do_softirq(void)
local_irq_save(flags);
if (local_softirq_pending()) {
- curtp = current_thread_info();
- irqtp = softirq_ctx[smp_processor_id()];
- irqtp->task = curtp->task;
- call_do_softirq(irqtp);
- irqtp->task = NULL;
+ account_system_vtime(current);
+ local_bh_disable();
+ do_softirq_onstack();
+ account_system_vtime(current);
+ __local_bh_enable();
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(do_softirq);
-#endif /* CONFIG_IRQSTACKS */
-
+#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
distribute_irqs = 0;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 1ae96a8..e789fef 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *system_id = "";
unsigned int *lp_index_ptr, lp_index = 0;
struct device_node *rtas_node;
- int *lrdrp;
+ int *lrdrp = NULL;
rootdn = find_path_device("/");
if (rootdn) {
@@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "partition_id=%d\n", (int)lp_index);
rtas_node = find_path_device("/rtas");
- lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
+ if (rtas_node)
+ lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity",
+ NULL);
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index d643144..ee166c5 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -26,8 +26,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
-#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */
-
int default_machine_kexec_prepare(struct kimage *image)
{
int i;
@@ -61,7 +59,7 @@ int default_machine_kexec_prepare(struct kimage *image)
*/
if (htab_address) {
low = __pa(htab_address);
- high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
+ high = low + htab_size_bytes;
for (i = 0; i < image->nr_segments; i++) {
begin = image->segment[i].mem;
@@ -294,7 +292,7 @@ void default_machine_kexec(struct kimage *image)
}
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base, htab_size, kernel_end;
+static unsigned long htab_base, kernel_end;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -305,7 +303,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = (unsigned char *)&htab_size,
+ .value = (unsigned char *)&htab_size_bytes,
};
static struct property kernel_end_prop = {
@@ -331,8 +329,6 @@ static void __init export_htab_values(void)
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
-
- htab_size = 1UL << ppc64_pft_size;
prom_add_property(node, &htab_size_prop);
out:
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index c367520..ba92bab 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -589,7 +589,6 @@ void __devinit scan_phb(struct pci_controller *hose)
#endif /* CONFIG_PPC_MULTIPLATFORM */
if (mode == PCI_PROBE_NORMAL)
hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
- pci_bus_add_devices(bus);
}
static int __init pcibios_init(void)
@@ -608,8 +607,10 @@ static int __init pcibios_init(void)
printk("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
scan_phb(hose);
+ pci_bus_add_devices(hose->bus);
+ }
#ifndef CONFIG_PPC_ISERIES
if (pci_probe_only)
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index b212d3e..dfa5398 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -78,15 +78,8 @@ EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strcasecmp);
EXPORT_SYMBOL(csum_partial);
@@ -116,15 +109,6 @@ EXPORT_SYMBOL(_insw_ns);
EXPORT_SYMBOL(_outsw_ns);
EXPORT_SYMBOL(_insl_ns);
EXPORT_SYMBOL(_outsl_ns);
-EXPORT_SYMBOL(ioremap);
-#ifdef CONFIG_44x
-EXPORT_SYMBOL(ioremap64);
-#endif
-EXPORT_SYMBOL(__ioremap);
-EXPORT_SYMBOL(iounmap);
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
-#endif
#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
EXPORT_SYMBOL(ppc_ide_md);
@@ -167,7 +151,6 @@ EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(smp_call_function);
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(smp_hw_index);
#endif
@@ -184,9 +167,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_PPC_PMAC
-EXPORT_SYMBOL(sys_ctrler);
-#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL(kd_mksound);
#endif
@@ -204,7 +184,6 @@ EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
@@ -213,7 +192,6 @@ EXPORT_SYMBOL(screen_info);
#endif
#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -221,10 +199,6 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(cacheable_memcpy);
#endif
-EXPORT_SYMBOL(__up);
-EXPORT_SYMBOL(__down);
-EXPORT_SYMBOL(__down_interruptible);
-
#ifdef CONFIG_8xx
EXPORT_SYMBOL(cpm_install_handler);
EXPORT_SYMBOL(cpm_free_handler);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1201880..1770a06 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -45,9 +45,9 @@
#include <asm/mmu.h>
#include <asm/prom.h>
#include <asm/machdep.h>
+#include <asm/time.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
-#include <asm/time.h>
#endif
extern unsigned long _get_SP(void);
@@ -328,6 +328,11 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif
local_irq_save(flags);
+
+ account_system_vtime(current);
+ account_process_vtime(current);
+ calculate_steal_time();
+
last = _switch(old_thread, new_thread);
local_irq_restore(flags);
@@ -886,3 +891,35 @@ void dump_stack(void)
show_stack(current, NULL);
}
EXPORT_SYMBOL(dump_stack);
+
+#ifdef CONFIG_PPC64
+void ppc64_runlatch_on(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+
+ set_thread_flag(TIF_RUNLATCH);
+ }
+}
+
+void ppc64_runlatch_off(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
+ HMT_medium();
+
+ clear_thread_flag(TIF_RUNLATCH);
+
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 82d117c..d63cd56 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -816,8 +816,6 @@ void __init unflatten_device_tree(void)
{
unsigned long start, mem, size;
struct device_node **allnextp = &allnodes;
- char *p = NULL;
- int l = 0;
DBG(" -> unflatten_device_tree()\n");
@@ -853,19 +851,6 @@ void __init unflatten_device_tree(void)
if (of_chosen == NULL)
of_chosen = of_find_node_by_path("/chosen@0");
- /* Retreive command line */
- if (of_chosen != NULL) {
- p = (char *)get_property(of_chosen, "bootargs", &l);
- if (p != NULL && l > 0)
- strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
- }
-#ifdef CONFIG_CMDLINE
- if (l == 0 || (l == 1 && (*p) == 0))
- strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif /* CONFIG_CMDLINE */
-
- DBG("Command line is: %s\n", cmd_line);
-
DBG(" <- unflatten_device_tree()\n");
}
@@ -936,6 +921,8 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
{
u32 *prop;
unsigned long *lprop;
+ unsigned long l;
+ char *p;
DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
@@ -1000,6 +987,41 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
crashk_res.end = crashk_res.start + *lprop - 1;
#endif
+ /* Retreive command line */
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0)
+ strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE));
+
+#ifdef CONFIG_CMDLINE
+ if (l == 0 || (l == 1 && (*p) == 0))
+ strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif /* CONFIG_CMDLINE */
+
+ DBG("Command line is: %s\n", cmd_line);
+
+ if (strstr(cmd_line, "mem=")) {
+ char *p, *q;
+ unsigned long maxmem = 0;
+
+ for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) {
+ q = p + 4;
+ if (p > cmd_line && p[-1] != ' ')
+ continue;
+ maxmem = simple_strtoul(q, &q, 0);
+ if (*q == 'k' || *q == 'K') {
+ maxmem <<= 10;
+ ++q;
+ } else if (*q == 'm' || *q == 'M') {
+ maxmem <<= 20;
+ ++q;
+ } else if (*q == 'g' || *q == 'G') {
+ maxmem <<= 30;
+ ++q;
+ }
+ }
+ memory_limit = maxmem;
+ }
+
/* break now */
return 1;
}
@@ -1120,7 +1142,7 @@ static void __init early_reserve_mem(void)
size_32 = *(reserve_map_32++);
if (size_32 == 0)
break;
- DBG("reserving: %lx -> %lx\n", base_32, size_32);
+ DBG("reserving: %x -> %x\n", base_32, size_32);
lmb_reserve(base_32, size_32);
}
return;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ec7153f..813c2cd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -205,14 +205,6 @@ static cell_t __initdata regbuf[1024];
#define MAX_CPU_THREADS 2
-/* TO GO */
-#ifdef CONFIG_HMT
-struct {
- unsigned int pir;
- unsigned int threadid;
-} hmt_thread_data[NR_CPUS];
-#endif /* CONFIG_HMT */
-
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
@@ -986,7 +978,7 @@ static void __init prom_init_mem(void)
if (size == 0)
continue;
prom_debug(" %x %x\n", base, size);
- if (base == 0)
+ if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR))
RELOC(rmo_top) = size;
if ((base + size) > RELOC(ram_top))
RELOC(ram_top) = base + size;
@@ -1319,10 +1311,6 @@ static void __init prom_hold_cpus(void)
*/
*spinloop = 0;
-#ifdef CONFIG_HMT
- for (i = 0; i < NR_CPUS; i++)
- RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
-#endif
/* look for cpus */
for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
@@ -1389,32 +1377,6 @@ static void __init prom_hold_cpus(void)
/* Reserve cpu #s for secondary threads. They start later. */
cpuid += cpu_threads;
}
-#ifdef CONFIG_HMT
- /* Only enable HMT on processors that provide support. */
- if (__is_processor(PV_PULSAR) ||
- __is_processor(PV_ICESTAR) ||
- __is_processor(PV_SSTAR)) {
- prom_printf(" starting secondary threads\n");
-
- for (i = 0; i < NR_CPUS; i += 2) {
- if (!cpu_online(i))
- continue;
-
- if (i == 0) {
- unsigned long pir = mfspr(SPRN_PIR);
- if (__is_processor(PV_PULSAR)) {
- RELOC(hmt_thread_data)[i].pir =
- pir & 0x1f;
- } else {
- RELOC(hmt_thread_data)[i].pir =
- pir & 0x3ff;
- }
- }
- }
- } else {
- prom_printf("Processor is not HMT capable\n");
- }
-#endif
if (cpuid > NR_CPUS)
prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 400793c..bcb8357 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -561,10 +561,7 @@ void do_syscall_trace_leave(struct pt_regs *regs)
regs->result);
if ((test_thread_flag(TIF_SYSCALL_TRACE)
-#ifdef CONFIG_PPC64
- || test_thread_flag(TIF_SINGLESTEP)
-#endif
- )
+ || test_thread_flag(TIF_SINGLESTEP))
&& (current->ptrace & PT_PTRACED))
do_syscall_trace();
}
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index 635d3b9..34d073f 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -52,7 +52,7 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt() && printk_ratelimit()) {
- memset(&rtc_tm, 0, sizeof(struct rtc_time));
+ memset(rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 03afb79..57b539a 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -278,8 +278,7 @@ static int phb_set_bus_ranges(struct device_node *dev,
return 0;
}
-static int __devinit setup_phb(struct device_node *dev,
- struct pci_controller *phb)
+int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb)
{
if (is_python(dev))
python_countermeasures(dev);
@@ -357,27 +356,6 @@ unsigned long __init find_and_init_phbs(void)
return 0;
}
-struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
-{
- struct pci_controller *phb;
- int primary;
-
- primary = list_empty(&hose_list);
- phb = pcibios_alloc_controller(dn);
- if (!phb)
- return NULL;
- setup_phb(dn, phb);
- pci_process_bridge_OF_ranges(phb, dn, primary);
-
- pci_setup_phb_io_dynamic(phb, primary);
-
- pci_devs_phb_init_dynamic(phb);
- scan_phb(phb);
-
- return phb;
-}
-EXPORT_SYMBOL(init_phb_dynamic);
-
/* RPA-specific bits for removing PHBs */
int pcibios_remove_root_bus(struct pci_controller *phb)
{
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a717dff..f96c49b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -311,8 +311,6 @@ void smp_release_cpus(void)
DBG(" <- smp_release_cpus()\n");
}
-#else
-#define smp_release_cpus()
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
@@ -473,10 +471,12 @@ void __init setup_system(void)
check_smt_enabled();
smp_setup_cpu_maps();
+#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
+#endif
printk("Starting Linux PPC64 %s\n", system_utsname.version);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index bd837b5..d7a4e81 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -151,10 +151,7 @@ static inline int save_general_regs(struct pt_regs *regs,
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
- if (!FULL_REGS(regs)) {
- set_thread_flag(TIF_SAVE_NVGPRS);
- current_thread_info()->nvgprs_frame = frame->mc_gregs;
- }
+ WARN_ON(!FULL_REGS(regs));
for (i = 0; i <= PT_RESULT; i ++) {
if (i == 14 && !FULL_REGS(regs))
@@ -215,15 +212,7 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka,
static inline int save_general_regs(struct pt_regs *regs,
struct mcontext __user *frame)
{
- if (!FULL_REGS(regs)) {
- /* Zero out the unsaved GPRs to avoid information
- leak, and set TIF_SAVE_NVGPRS to ensure that the
- registers do actually get saved later. */
- memset(&regs->gpr[14], 0, 18 * sizeof(unsigned long));
- current_thread_info()->nvgprs_frame = &frame->mc_gregs;
- set_thread_flag(TIF_SAVE_NVGPRS);
- }
-
+ WARN_ON(!FULL_REGS(regs));
return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
}
@@ -826,8 +815,8 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
}
long sys_swapcontext(struct ucontext __user *old_ctx,
- struct ucontext __user *new_ctx,
- int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
+ struct ucontext __user *new_ctx,
+ int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
{
unsigned char tmp;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 59b9c9c..47f9103 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -116,14 +116,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
err |= __put_user(&sc->gp_regs, &sc->regs);
- if (!FULL_REGS(regs)) {
- /* Zero out the unsaved GPRs to avoid information
- leak, and set TIF_SAVE_NVGPRS to ensure that the
- registers do actually get saved later. */
- memset(&regs->gpr[14], 0, 18 * sizeof(unsigned long));
- set_thread_flag(TIF_SAVE_NVGPRS);
- current_thread_info()->nvgprs_frame = &sc->gp_regs;
- }
+ WARN_ON(!FULL_REGS(regs));
err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
err |= __put_user(signr, &sc->signal);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 13595a6..805eaed 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -541,7 +541,7 @@ int __devinit start_secondary(void *unused)
smp_ops->take_timebase();
if (system_state > SYSTEM_BOOTING)
- per_cpu(last_jiffy, cpu) = get_tb();
+ snapshot_timebase();
spin_lock(&call_lock);
cpu_set(cpu, cpu_online_map);
@@ -573,6 +573,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
set_cpus_allowed(current, old_mask);
+ snapshot_timebases();
+
dump_numa_cpu_topology();
}
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index 475249d..cd75ab2 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -176,7 +176,6 @@ struct timex32 {
};
extern int do_adjtimex(struct timex *);
-extern void ppc_adjtimex(void);
asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
{
@@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
ret = do_adjtimex(&txc);
- /* adjust the conversion of TB to time of day to track adjtimex */
- ppc_adjtimex();
-
if(put_user(txc.modes, &utp->modes) ||
__put_user(txc.offset, &utp->offset) ||
__put_user(txc.freq, &utp->freq) ||
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 8a9f994..1ad55f0 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -288,7 +288,7 @@ COMPAT_SYS(clock_settime)
COMPAT_SYS(clock_gettime)
COMPAT_SYS(clock_getres)
COMPAT_SYS(clock_nanosleep)
-COMPAT_SYS(swapcontext)
+SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
COMPAT_SYS(tgkill)
COMPAT_SYS(utimes)
COMPAT_SYS(statfs64)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1886045..4a27218 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -50,6 +50,8 @@
#include <linux/security.h>
#include <linux/percpu.h>
#include <linux/rtc.h>
+#include <linux/jiffies.h>
+#include <linux/posix-timers.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -97,9 +99,18 @@ unsigned long tb_ticks_per_jiffy;
unsigned long tb_ticks_per_usec = 100; /* sane default */
EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
+EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
u64 tb_to_xs;
unsigned tb_to_us;
-unsigned long processor_freq;
+
+#define TICKLEN_SCALE (SHIFT_SCALE - 10)
+u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
+u64 ticklen_to_xs; /* 0.64 fraction */
+
+/* If last_tick_len corresponds to about 1/HZ seconds, then
+ last_tick_len << TICKLEN_SHIFT will be about 2^63. */
+#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
+
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
@@ -113,10 +124,6 @@ extern unsigned long wall_jiffies;
extern struct timezone sys_tz;
static long timezone_offset;
-void ppc_adjtimex(void);
-
-static unsigned adjusting_time = 0;
-
unsigned long ppc_proc_freq;
unsigned long ppc_tb_freq;
@@ -130,6 +137,224 @@ unsigned long tb_last_stamp;
*/
DEFINE_PER_CPU(unsigned long, last_jiffy);
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+/*
+ * Factors for converting from cputime_t (timebase ticks) to
+ * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * These are all stored as 0.64 fixed-point binary fractions.
+ */
+u64 __cputime_jiffies_factor;
+EXPORT_SYMBOL(__cputime_jiffies_factor);
+u64 __cputime_msec_factor;
+EXPORT_SYMBOL(__cputime_msec_factor);
+u64 __cputime_sec_factor;
+EXPORT_SYMBOL(__cputime_sec_factor);
+u64 __cputime_clockt_factor;
+EXPORT_SYMBOL(__cputime_clockt_factor);
+
+static void calc_cputime_factors(void)
+{
+ struct div_result res;
+
+ div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
+ __cputime_jiffies_factor = res.result_low;
+ div128_by_32(1000, 0, tb_ticks_per_sec, &res);
+ __cputime_msec_factor = res.result_low;
+ div128_by_32(1, 0, tb_ticks_per_sec, &res);
+ __cputime_sec_factor = res.result_low;
+ div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
+ __cputime_clockt_factor = res.result_low;
+}
+
+/*
+ * Read the PURR on systems that have it, otherwise the timebase.
+ */
+static u64 read_purr(void)
+{
+ if (cpu_has_feature(CPU_FTR_PURR))
+ return mfspr(SPRN_PURR);
+ return mftb();
+}
+
+/*
+ * Account time for a transition between system, hard irq
+ * or soft irq state.
+ */
+void account_system_vtime(struct task_struct *tsk)
+{
+ u64 now, delta;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ now = read_purr();
+ delta = now - get_paca()->startpurr;
+ get_paca()->startpurr = now;
+ if (!in_interrupt()) {
+ delta += get_paca()->system_time;
+ get_paca()->system_time = 0;
+ }
+ account_system_time(tsk, 0, delta);
+ local_irq_restore(flags);
+}
+
+/*
+ * Transfer the user and system times accumulated in the paca
+ * by the exception entry and exit code to the generic process
+ * user and system time records.
+ * Must be called with interrupts disabled.
+ */
+void account_process_vtime(struct task_struct *tsk)
+{
+ cputime_t utime;
+
+ utime = get_paca()->user_time;
+ get_paca()->user_time = 0;
+ account_user_time(tsk, utime);
+}
+
+static void account_process_time(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ account_process_vtime(current);
+ run_local_timers();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, user_mode(regs));
+ scheduler_tick();
+ run_posix_cpu_timers(current);
+}
+
+#ifdef CONFIG_PPC_SPLPAR
+/*
+ * Stuff for accounting stolen time.
+ */
+struct cpu_purr_data {
+ int initialized; /* thread is running */
+ u64 tb0; /* timebase at origin time */
+ u64 purr0; /* PURR at origin time */
+ u64 tb; /* last TB value read */
+ u64 purr; /* last PURR value read */
+ u64 stolen; /* stolen time so far */
+ spinlock_t lock;
+};
+
+static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
+
+static void snapshot_tb_and_purr(void *data)
+{
+ struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
+
+ p->tb0 = mftb();
+ p->purr0 = mfspr(SPRN_PURR);
+ p->tb = p->tb0;
+ p->purr = 0;
+ wmb();
+ p->initialized = 1;
+}
+
+/*
+ * Called during boot when all cpus have come up.
+ */
+void snapshot_timebases(void)
+{
+ int cpu;
+
+ if (!cpu_has_feature(CPU_FTR_PURR))
+ return;
+ for_each_cpu(cpu)
+ spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
+ on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
+}
+
+void calculate_steal_time(void)
+{
+ u64 tb, purr, t0;
+ s64 stolen;
+ struct cpu_purr_data *p0, *pme, *phim;
+ int cpu;
+
+ if (!cpu_has_feature(CPU_FTR_PURR))
+ return;
+ cpu = smp_processor_id();
+ pme = &per_cpu(cpu_purr_data, cpu);
+ if (!pme->initialized)
+ return; /* this can happen in early boot */
+ p0 = &per_cpu(cpu_purr_data, cpu & ~1);
+ phim = &per_cpu(cpu_purr_data, cpu ^ 1);
+ spin_lock(&p0->lock);
+ tb = mftb();
+ purr = mfspr(SPRN_PURR) - pme->purr0;
+ if (!phim->initialized || !cpu_online(cpu ^ 1)) {
+ stolen = (tb - pme->tb) - (purr - pme->purr);
+ } else {
+ t0 = pme->tb0;
+ if (phim->tb0 < t0)
+ t0 = phim->tb0;
+ stolen = phim->tb - t0 - phim->purr - purr - p0->stolen;
+ }
+ if (stolen > 0) {
+ account_steal_time(current, stolen);
+ p0->stolen += stolen;
+ }
+ pme->tb = tb;
+ pme->purr = purr;
+ spin_unlock(&p0->lock);
+}
+
+/*
+ * Must be called before the cpu is added to the online map when
+ * a cpu is being brought up at runtime.
+ */
+static void snapshot_purr(void)
+{
+ int cpu;
+ u64 purr;
+ struct cpu_purr_data *p0, *pme, *phim;
+ unsigned long flags;
+
+ if (!cpu_has_feature(CPU_FTR_PURR))
+ return;
+ cpu = smp_processor_id();
+ pme = &per_cpu(cpu_purr_data, cpu);
+ p0 = &per_cpu(cpu_purr_data, cpu & ~1);
+ phim = &per_cpu(cpu_purr_data, cpu ^ 1);
+ spin_lock_irqsave(&p0->lock, flags);
+ pme->tb = pme->tb0 = mftb();
+ purr = mfspr(SPRN_PURR);
+ if (!phim->initialized) {
+ pme->purr = 0;
+ pme->purr0 = purr;
+ } else {
+ /* set p->purr and p->purr0 for no change in p0->stolen */
+ pme->purr = phim->tb - phim->tb0 - phim->purr - p0->stolen;
+ pme->purr0 = purr - pme->purr;
+ }
+ pme->initialized = 1;
+ spin_unlock_irqrestore(&p0->lock, flags);
+}
+
+#endif /* CONFIG_PPC_SPLPAR */
+
+#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
+#define calc_cputime_factors()
+#define account_process_time(regs) update_process_times(user_mode(regs))
+#define calculate_steal_time() do { } while (0)
+#endif
+
+#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
+#define snapshot_purr() do { } while (0)
+#endif
+
+/*
+ * Called when a cpu comes up after the system has finished booting,
+ * i.e. as a result of a hotplug cpu action.
+ */
+void snapshot_timebase(void)
+{
+ __get_cpu_var(last_jiffy) = get_tb();
+ snapshot_purr();
+}
+
void __delay(unsigned long loops)
{
unsigned long start;
@@ -178,8 +403,7 @@ static __inline__ void timer_check_rtc(void)
*/
if (ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
- jiffies - wall_jiffies == 1) {
+ abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
struct rtc_time tm;
to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
@@ -226,15 +450,14 @@ void do_gettimeofday(struct timeval *tv)
if (__USE_RTC()) {
/* do this the old way */
unsigned long flags, seq;
- unsigned int sec, nsec, usec, lost;
+ unsigned int sec, nsec, usec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
- lost = jiffies - wall_jiffies;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
- usec = nsec / 1000 + lost * (1000000 / HZ);
+ usec = nsec / 1000;
while (usec >= 1000000) {
usec -= 1000000;
++sec;
@@ -248,23 +471,6 @@ void do_gettimeofday(struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday);
-/* Synchronize xtime with do_gettimeofday */
-
-static inline void timer_sync_xtime(unsigned long cur_tb)
-{
-#ifdef CONFIG_PPC64
- /* why do we do this? */
- struct timeval my_tv;
-
- __do_gettimeofday(&my_tv, cur_tb);
-
- if (xtime.tv_sec <= my_tv.tv_sec) {
- xtime.tv_sec = my_tv.tv_sec;
- xtime.tv_nsec = my_tv.tv_usec * 1000;
- }
-#endif
-}
-
/*
* There are two copies of tb_to_xs and stamp_xsec so that no
* lock is needed to access and use these values in
@@ -297,9 +503,9 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
* the two values of tb_update_count match and are even then the
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
+ * We expect the caller to have done the first increment of
+ * vdso_data->tb_update_count already.
*/
- ++(vdso_data->tb_update_count);
- smp_wmb();
vdso_data->tb_orig_stamp = new_tb_stamp;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
@@ -323,15 +529,40 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
{
unsigned long offset;
u64 new_stamp_xsec;
+ u64 tlen, t2x;
+ u64 tb, xsec_old, xsec_new;
+ struct gettimeofday_vars *varp;
if (__USE_RTC())
return;
+ tlen = current_tick_length();
offset = cur_tb - do_gtod.varp->tb_orig_stamp;
- if ((offset & 0x80000000u) == 0)
+ if (tlen == last_tick_len && offset < 0x80000000u)
return;
- new_stamp_xsec = do_gtod.varp->stamp_xsec
- + mulhdu(offset, do_gtod.varp->tb_to_xs);
- update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
+ if (tlen != last_tick_len) {
+ t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
+ last_tick_len = tlen;
+ } else
+ t2x = do_gtod.varp->tb_to_xs;
+ new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
+ do_div(new_stamp_xsec, 1000000000);
+ new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
+
+ ++vdso_data->tb_update_count;
+ smp_mb();
+
+ /*
+ * Make sure time doesn't go backwards for userspace gettimeofday.
+ */
+ tb = get_tb();
+ varp = do_gtod.varp;
+ xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
+ + varp->stamp_xsec;
+ xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
+ if (xsec_new < xsec_old)
+ new_stamp_xsec += xsec_old - xsec_new;
+
+ update_gtod(cur_tb, new_stamp_xsec, t2x);
}
#ifdef CONFIG_SMP
@@ -381,6 +612,7 @@ static void iSeries_tb_recal(void)
new_tb_ticks_per_jiffy, sign, tick_diff );
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
tb_ticks_per_sec = new_tb_ticks_per_sec;
+ calc_cputime_factors();
div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
tb_to_xs = divres.result_low;
@@ -429,6 +661,7 @@ void timer_interrupt(struct pt_regs * regs)
irq_enter();
profile_tick(CPU_PROFILING, regs);
+ calculate_steal_time();
#ifdef CONFIG_PPC_ISERIES
get_lppaca()->int_dword.fields.decr_int = 0;
@@ -450,7 +683,7 @@ void timer_interrupt(struct pt_regs * regs)
* is the case.
*/
if (!cpu_is_offline(cpu))
- update_process_times(user_mode(regs));
+ account_process_time(regs);
/*
* No need to check whether cpu is offline here; boot_cpuid
@@ -462,13 +695,10 @@ void timer_interrupt(struct pt_regs * regs)
write_seqlock(&xtime_lock);
tb_last_jiffy += tb_ticks_per_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
- timer_recalc_offset(tb_last_jiffy);
do_timer(regs);
- timer_sync_xtime(tb_last_jiffy);
+ timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
write_sequnlock(&xtime_lock);
- if (adjusting_time && (time_adjust == 0))
- ppc_adjtimex();
}
next_dec = tb_ticks_per_jiffy - ticks;
@@ -492,29 +722,45 @@ void timer_interrupt(struct pt_regs * regs)
void wakeup_decrementer(void)
{
- int i;
+ unsigned long ticks;
- set_dec(tb_ticks_per_jiffy);
/*
- * We don't expect this to be called on a machine with a 601,
- * so using get_tbl is fine.
+ * The timebase gets saved on sleep and restored on wakeup,
+ * so all we need to do is to reset the decrementer.
*/
- tb_last_stamp = tb_last_jiffy = get_tb();
- for_each_cpu(i)
- per_cpu(last_jiffy, i) = tb_last_stamp;
+ ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
+ if (ticks < tb_ticks_per_jiffy)
+ ticks = tb_ticks_per_jiffy - ticks;
+ else
+ ticks = 1;
+ set_dec(ticks);
}
#ifdef CONFIG_SMP
void __init smp_space_timers(unsigned int max_cpus)
{
int i;
+ unsigned long half = tb_ticks_per_jiffy / 2;
unsigned long offset = tb_ticks_per_jiffy / max_cpus;
unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
previous_tb -= tb_ticks_per_jiffy;
+ /*
+ * The stolen time calculation for POWER5 shared-processor LPAR
+ * systems works better if the two threads' timebase interrupts
+ * are staggered by half a jiffy with respect to each other.
+ */
for_each_cpu(i) {
- if (i != boot_cpuid) {
+ if (i == boot_cpuid)
+ continue;
+ if (i == (boot_cpuid ^ 1))
+ per_cpu(last_jiffy, i) =
+ per_cpu(last_jiffy, boot_cpuid) - half;
+ else if (i & 1)
+ per_cpu(last_jiffy, i) =
+ per_cpu(last_jiffy, i ^ 1) + half;
+ else {
previous_tb += offset;
per_cpu(last_jiffy, i) = previous_tb;
}
@@ -541,8 +787,8 @@ int do_settimeofday(struct timespec *tv)
time_t wtm_sec, new_sec = tv->tv_sec;
long wtm_nsec, new_nsec = tv->tv_nsec;
unsigned long flags;
- long int tb_delta;
- u64 new_xsec, tb_delta_xs;
+ u64 new_xsec;
+ unsigned long tb_delta;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@@ -563,9 +809,23 @@ int do_settimeofday(struct timespec *tv)
first_settimeofday = 0;
}
#endif
+
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_mb();
+
+ /*
+ * Subtract off the number of nanoseconds since the
+ * beginning of the last tick.
+ * Note that since we don't increment jiffies_64 anywhere other
+ * than in do_timer (since we don't have a lost tick problem),
+ * wall_jiffies will always be the same as jiffies,
+ * and therefore the (jiffies - wall_jiffies) computation
+ * has been removed.
+ */
tb_delta = tb_ticks_since(tb_last_stamp);
- tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
- tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
+ tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
+ new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -580,12 +840,12 @@ int do_settimeofday(struct timespec *tv)
ntp_clear();
- new_xsec = 0;
- if (new_nsec != 0) {
- new_xsec = (u64)new_nsec * XSEC_PER_SEC;
+ new_xsec = xtime.tv_nsec;
+ if (new_xsec != 0) {
+ new_xsec *= XSEC_PER_SEC;
do_div(new_xsec, NSEC_PER_SEC);
}
- new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
+ new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@@ -671,7 +931,7 @@ void __init time_init(void)
unsigned long flags;
unsigned long tm = 0;
struct div_result res;
- u64 scale;
+ u64 scale, x;
unsigned shift;
if (ppc_md.time_init != NULL)
@@ -693,11 +953,43 @@ void __init time_init(void)
}
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
- tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
+ tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
- div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
- tb_to_xs = res.result_low;
+ calc_cputime_factors();
+
+ /*
+ * Calculate the length of each tick in ns. It will not be
+ * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
+ * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
+ * rounded up.
+ */
+ x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
+ do_div(x, ppc_tb_freq);
+ tick_nsec = x;
+ last_tick_len = x << TICKLEN_SCALE;
+
+ /*
+ * Compute ticklen_to_xs, which is a factor which gets multiplied
+ * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
+ * It is computed as:
+ * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
+ * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
+ * which turns out to be N = 51 - SHIFT_HZ.
+ * This gives the result as a 0.64 fixed-point fraction.
+ * That value is reduced by an offset amounting to 1 xsec per
+ * 2^31 timebase ticks to avoid problems with time going backwards
+ * by 1 xsec when we do timer_recalc_offset due to losing the
+ * fractional xsec. That offset is equal to ppc_tb_freq/2^51
+ * since there are 2^20 xsec in a second.
+ */
+ div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
+ tb_ticks_per_jiffy << SHIFT_HZ, &res);
+ div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
+ ticklen_to_xs = res.result_low;
+
+ /* Compute tb_to_xs from tick_nsec */
+ tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
/*
* Compute scale factor for sched_clock.
@@ -724,6 +1016,14 @@ void __init time_init(void)
tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
+
+ /* If platform provided a timezone (pmac), we correct the time */
+ if (timezone_offset) {
+ sys_tz.tz_minuteswest = -timezone_offset / 60;
+ sys_tz.tz_dsttime = 0;
+ tm -= timezone_offset;
+ }
+
xtime.tv_sec = tm;
xtime.tv_nsec = 0;
do_gtod.varp = &do_gtod.vars[0];
@@ -738,18 +1038,11 @@ void __init time_init(void)
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
- vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+ vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
time_freq = 0;
- /* If platform provided a timezone (pmac), we correct the time */
- if (timezone_offset) {
- sys_tz.tz_minuteswest = -timezone_offset / 60;
- sys_tz.tz_dsttime = 0;
- xtime.tv_sec -= timezone_offset;
- }
-
last_rtc_update = xtime.tv_sec;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
@@ -759,126 +1052,6 @@ void __init time_init(void)
set_dec(tb_ticks_per_jiffy);
}
-/*
- * After adjtimex is called, adjust the conversion of tb ticks
- * to microseconds to keep do_gettimeofday synchronized
- * with ntpd.
- *
- * Use the time_adjust, time_freq and time_offset computed by adjtimex to
- * adjust the frequency.
- */
-
-/* #define DEBUG_PPC_ADJTIMEX 1 */
-
-void ppc_adjtimex(void)
-{
-#ifdef CONFIG_PPC64
- unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
- new_tb_to_xs, new_xsec, new_stamp_xsec;
- unsigned long tb_ticks_per_sec_delta;
- long delta_freq, ltemp;
- struct div_result divres;
- unsigned long flags;
- long singleshot_ppm = 0;
-
- /*
- * Compute parts per million frequency adjustment to
- * accomplish the time adjustment implied by time_offset to be
- * applied over the elapsed time indicated by time_constant.
- * Use SHIFT_USEC to get it into the same units as
- * time_freq.
- */
- if ( time_offset < 0 ) {
- ltemp = -time_offset;
- ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
- ltemp >>= SHIFT_KG + time_constant;
- ltemp = -ltemp;
- } else {
- ltemp = time_offset;
- ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
- ltemp >>= SHIFT_KG + time_constant;
- }
-
- /* If there is a single shot time adjustment in progress */
- if ( time_adjust ) {
-#ifdef DEBUG_PPC_ADJTIMEX
- printk("ppc_adjtimex: ");
- if ( adjusting_time == 0 )
- printk("starting ");
- printk("single shot time_adjust = %ld\n", time_adjust);
-#endif
-
- adjusting_time = 1;
-
- /*
- * Compute parts per million frequency adjustment
- * to match time_adjust
- */
- singleshot_ppm = tickadj * HZ;
- /*
- * The adjustment should be tickadj*HZ to match the code in
- * linux/kernel/timer.c, but experiments show that this is too
- * large. 3/4 of tickadj*HZ seems about right
- */
- singleshot_ppm -= singleshot_ppm / 4;
- /* Use SHIFT_USEC to get it into the same units as time_freq */
- singleshot_ppm <<= SHIFT_USEC;
- if ( time_adjust < 0 )
- singleshot_ppm = -singleshot_ppm;
- }
- else {
-#ifdef DEBUG_PPC_ADJTIMEX
- if ( adjusting_time )
- printk("ppc_adjtimex: ending single shot time_adjust\n");
-#endif
- adjusting_time = 0;
- }
-
- /* Add up all of the frequency adjustments */
- delta_freq = time_freq + ltemp + singleshot_ppm;
-
- /*
- * Compute a new value for tb_ticks_per_sec based on
- * the frequency adjustment
- */
- den = 1000000 * (1 << (SHIFT_USEC - 8));
- if ( delta_freq < 0 ) {
- tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
- new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
- }
- else {
- tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
- new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
- }
-
-#ifdef DEBUG_PPC_ADJTIMEX
- printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
- printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
-#endif
-
- /*
- * Compute a new value of tb_to_xs (used to convert tb to
- * microseconds) and a new value of stamp_xsec which is the
- * time (in 1/2^20 second units) corresponding to
- * tb_orig_stamp. This new value of stamp_xsec compensates
- * for the change in frequency (implied by the new tb_to_xs)
- * which guarantees that the current time remains the same.
- */
- write_seqlock_irqsave( &xtime_lock, flags );
- tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
- div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
- new_tb_to_xs = divres.result_low;
- new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
-
- old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
- new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
-
- update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
-
- write_sequnlock_irqrestore( &xtime_lock, flags );
-#endif /* CONFIG_PPC64 */
-}
-
#define FEBRUARY 2
#define STARTOFTIME 1970
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 7509aa6..98660ae 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -814,6 +814,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
+ local_irq_enable();
+
/* Try to emulate it if we should. */
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b316fda..ec83703 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -180,8 +180,8 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
unsigned long offset = address - vma->vm_start;
struct page *pg;
#ifdef CONFIG_PPC64
- void *vbase = test_thread_flag(TIF_32BIT) ?
- vdso32_kbase : vdso64_kbase;
+ void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ?
+ vdso64_kbase : vdso32_kbase;
#else
void *vbase = vdso32_kbase;
#endif
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index ccaeda5..4ee871f 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -225,9 +225,9 @@ V_FUNCTION_BEGIN(__do_get_xsec)
.cfi_startproc
/* check for update count & load values */
1: ld r8,CFG_TB_UPDATE_COUNT(r3)
- andi. r0,r4,1 /* pending update ? loop */
+ andi. r0,r8,1 /* pending update ? loop */
bne- 1b
- xor r0,r4,r4 /* create dependency */
+ xor r0,r8,r8 /* create dependency */
add r3,r3,r0
/* Get TB & offset it */