aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips/hazards.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2005-03-01 18:12:06 +0000
committerRalf Baechle <ralf@linux-mips.org>2005-10-29 19:30:49 +0100
commit5068debff2dcbc8f624811e3c06d60c7c0bba744 (patch)
tree3d3a2ceceb91e6d5e9af7d28dfb4ddeed270c704 /include/asm-mips/hazards.h
parent0f04afb59565c3029563b9a79b3513c9f3327a27 (diff)
downloadkernel_samsung_aries-5068debff2dcbc8f624811e3c06d60c7c0bba744.zip
kernel_samsung_aries-5068debff2dcbc8f624811e3c06d60c7c0bba744.tar.gz
kernel_samsung_aries-5068debff2dcbc8f624811e3c06d60c7c0bba744.tar.bz2
New hazard handling function back_to_back_c0_hazard() to handle back to
back mtc0 / mfc0 pairs from the same coprocessor register. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips/hazards.h')
-rw-r--r--include/asm-mips/hazards.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/include/asm-mips/hazards.h b/include/asm-mips/hazards.h
index f524eac..d6e88cf 100644
--- a/include/asm-mips/hazards.h
+++ b/include/asm-mips/hazards.h
@@ -124,6 +124,9 @@ __asm__(
".set\tmips32\n\t" \
"_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
".set\tmips0")
+
+#define back_to_back_c0_hazard() do { } while (0)
+
#else
/*
@@ -141,6 +144,12 @@ __asm__(
"nop; nop; nop; nop; nop; nop;\n\t" \
".set reorder\n\t")
+#define back_to_back_c0_hazard() \
+ __asm__ __volatile__( \
+ " .set noreorder \n" \
+ " nop; nop; nop \n" \
+ " .set reorder \n")
+
#endif
/*
@@ -170,6 +179,10 @@ __asm__(
__asm__ __volatile__( \
"_ehb\t\t\t\t# irq_disable_hazard")
+#define back_to_back_c0_hazard() \
+ __asm__ __volatile__( \
+ "_ehb\t\t\t\t# back_to_back_c0_hazard")
+
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
/*
@@ -186,6 +199,8 @@ __asm__(
#define irq_enable_hazard() do { } while (0)
#define irq_disable_hazard() do { } while (0)
+#define back_to_back_c0_hazard() do { } while (0)
+
#else
/*
@@ -210,6 +225,12 @@ __asm__(
__asm__ __volatile__( \
"_ssnop; _ssnop; _ssnop;\t\t# irq_disable_hazard")
+#define back_to_back_c0_hazard() \
+ __asm__ __volatile__( \
+ " .set noreorder \n" \
+ " nop; nop; nop \n" \
+ " .set reorder \n")
+
#endif
#endif /* __ASSEMBLY__ */