diff options
Diffstat (limited to 'arch/m32r')
-rw-r--r-- | arch/m32r/Kconfig.debug | 2 | ||||
-rw-r--r-- | arch/m32r/Makefile | 6 | ||||
-rw-r--r-- | arch/m32r/kernel/irq.c | 10 | ||||
-rw-r--r-- | arch/m32r/kernel/signal.c | 24 | ||||
-rw-r--r-- | arch/m32r/kernel/sys_m32r.c | 61 | ||||
-rw-r--r-- | arch/m32r/lib/usercopy.c | 4 | ||||
-rw-r--r-- | arch/m32r/mm/init.c | 4 |
7 files changed, 56 insertions, 55 deletions
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug index bbf711b..2e1019d 100644 --- a/arch/m32r/Kconfig.debug +++ b/arch/m32r/Kconfig.debug @@ -19,7 +19,7 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. config DEBUG_PAGEALLOC - bool "Page alloc debugging" + bool "Debug page memory allocations" depends on DEBUG_KERNEL && BROKEN help Unmap pages from the kernel linear mapping after free_pages(). diff --git a/arch/m32r/Makefile b/arch/m32r/Makefile index 229f66f..f219c47 100644 --- a/arch/m32r/Makefile +++ b/arch/m32r/Makefile @@ -15,14 +15,14 @@ CFLAGS_MODULE += -mmodel=large ifdef CONFIG_CHIP_VDEC2 cflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -Wa,-bitinst -aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -Wa,-bitinst +aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -O2 -Wa,-bitinst -Wa,-no-parallel else cflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2 -aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2 +aflags-$(CONFIG_ISA_M32R2) += -DNO_FPU -m32r2 -O2 endif cflags-$(CONFIG_ISA_M32R) += -DNO_FPU -aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -Wa,-no-bitinst +aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -O2 -Wa,-no-bitinst CFLAGS += $(cflags-y) AFLAGS += $(aflags-y) diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 1ce6392..a4634b0 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v) if (i == 0) { seq_printf(p, " "); - for (j=0; j<NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "CPU%d ",j); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } @@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v) #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else - for (j = 0; j < NR_CPUS; j++) - if (cpu_online(j)) - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif seq_printf(p, " %14s", irq_desc[i].handler->typename); seq_printf(p, " %s", action->name); diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 71763f7..cb33097 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c @@ -36,7 +36,7 @@ int do_signal(struct pt_regs *, sigset_t *); asmlinkage int sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, unsigned long r2, unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, struct pt_regs regs) + unsigned long r5, unsigned long r6, struct pt_regs *regs) { sigset_t saveset, newset; @@ -54,21 +54,21 @@ sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - regs.r0 = -EINTR; + regs->r0 = -EINTR; while (1) { current->state = TASK_INTERRUPTIBLE; schedule(); - if (do_signal(®s, &saveset)) - return regs.r0; + if (do_signal(regs, &saveset)) + return regs->r0; } } asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, unsigned long r2, unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, struct pt_regs regs) + unsigned long r5, unsigned long r6, struct pt_regs *regs) { - return do_sigaltstack(uss, uoss, regs.spu); + return do_sigaltstack(uss, uoss, regs->spu); } @@ -140,11 +140,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, asmlinkage int sys_rt_sigreturn(unsigned long r0, unsigned long r1, unsigned long r2, unsigned long r3, unsigned long r4, - unsigned long r5, unsigned long r6, struct pt_regs regs) + unsigned long r5, unsigned long r6, struct pt_regs *regs) { - struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.spu; + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->spu; sigset_t set; - stack_t st; int result; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) @@ -158,14 +157,11 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &result)) + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result)) goto badframe; - if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) + if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->spu) == -EFAULT) goto badframe; - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ - do_sigaltstack(&st, NULL, regs.spu); return result; diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index fe55b28..670cb49 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c @@ -29,28 +29,7 @@ /* * sys_tas() - test-and-set - * linuxthreads testing version */ -#ifndef CONFIG_SMP -asmlinkage int sys_tas(int *addr) -{ - int oldval; - unsigned long flags; - - if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) - return -EFAULT; - local_irq_save(flags); - oldval = *addr; - if (!oldval) - *addr = 1; - local_irq_restore(flags); - return oldval; -} -#else /* CONFIG_SMP */ -#include <linux/spinlock.h> - -static DEFINE_SPINLOCK(tas_lock); - asmlinkage int sys_tas(int *addr) { int oldval; @@ -58,15 +37,43 @@ asmlinkage int sys_tas(int *addr) if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; - _raw_spin_lock(&tas_lock); - oldval = *addr; - if (!oldval) - *addr = 1; - _raw_spin_unlock(&tas_lock); + /* atomic operation: + * oldval = *addr; *addr = 1; + */ + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + " .fillinsn\n" + "1:\n" + " lock %0, @%1 -> unlock %2, @%1\n" + "2:\n" + /* NOTE: + * The m32r processor can accept interrupts only + * at the 32-bit instruction boundary. + * So, in the above code, the "unlock" instruction + * can be executed continuously after the "lock" + * instruction execution without any interruptions. + */ + ".section .fixup,\"ax\"\n" + " .balign 4\n" + "3: ldi %0, #%3\n" + " seth r14, #high(2b)\n" + " or3 r14, r14, #low(2b)\n" + " jmp r14\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .balign 4\n" + " .long 1b,3b\n" + ".previous\n" + : "=&r" (oldval) + : "r" (addr), "r" (1), "i"(-EFAULT) + : "r14", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); return oldval; } -#endif /* CONFIG_SMP */ /* * sys_pipe() is the normal C calling standard for creating diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index ce16bbe..2d1dd21 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -64,7 +64,7 @@ do { \ " .balign 4\n" \ " .long 0b,3b\n" \ ".previous" \ - : "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \ + : "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \ "=&r" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \ "4"(dst) \ @@ -101,7 +101,7 @@ do { \ " .balign 4\n" \ " .long 0b,3b\n" \ ".previous" \ - : "=r"(res), "=r"(count), "=&r" (__d0), "=&r" (__d1), \ + : "=&r"(res), "=&r"(count), "=&r" (__d0), "=&r" (__d1), \ "=&r" (__d2) \ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), \ "4"(dst) \ diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index 6facf15..c9e7dad 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -226,7 +226,7 @@ void free_initmem(void) addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); - set_page_count(virt_to_page(addr), 1); + init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } @@ -244,7 +244,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) unsigned long p; for (p = start; p < end; p += PAGE_SIZE) { ClearPageReserved(virt_to_page(p)); - set_page_count(virt_to_page(p), 1); + init_page_count(virt_to_page(p)); free_page(p); totalram_pages++; } |