aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/cache.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2008-07-29 08:09:44 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-07-29 08:09:44 +0900
commitf15cbe6f1a4b4d9df59142fc8e4abb973302cf44 (patch)
tree774d7b11abaaf33561ab8268bf51ddd9ceb79025 /arch/sh/include/asm/cache.h
parent25326277d8d1393d1c66240e6255aca780f9e3eb (diff)
downloadkernel_goldelico_gta04-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.zip
kernel_goldelico_gta04-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.tar.gz
kernel_goldelico_gta04-f15cbe6f1a4b4d9df59142fc8e4abb973302cf44.tar.bz2
sh: migrate to arch/sh/include/
This follows the sparc changes a439fe51a1f8eb087c22dd24d69cebae4a3addac. Most of the moving about was done with Sam's directions at: http://marc.info/?l=linux-sh&m=121724823706062&w=2 with subsequent hacking and fixups entirely my fault. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/cache.h')
-rw-r--r--arch/sh/include/asm/cache.h51
1 files changed, 51 insertions, 0 deletions
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
new file mode 100644
index 0000000..02df18e
--- /dev/null
+++ b/arch/sh/include/asm/cache.h
@@ -0,0 +1,51 @@
+/* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $
+ *
+ * include/asm-sh/cache.h
+ *
+ * Copyright 1999 (C) Niibe Yutaka
+ * Copyright 2002, 2003 (C) Paul Mundt
+ */
+#ifndef __ASM_SH_CACHE_H
+#define __ASM_SH_CACHE_H
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <cpu/cache.h>
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+#ifndef __ASSEMBLY__
+struct cache_info {
+ unsigned int ways; /* Number of cache ways */
+ unsigned int sets; /* Number of cache sets */
+ unsigned int linesz; /* Cache line size (bytes) */
+
+ unsigned int way_size; /* sets * line size */
+
+ /*
+ * way_incr is the address offset for accessing the next way
+ * in memory mapped cache array ops.
+ */
+ unsigned int way_incr;
+ unsigned int entry_shift;
+ unsigned int entry_mask;
+
+ /*
+ * Compute a mask which selects the address bits which overlap between
+ * 1. those used to select the cache set during indexing
+ * 2. those in the physical page number.
+ */
+ unsigned int alias_mask;
+
+ unsigned int n_aliases; /* Number of aliases */
+
+ unsigned long flags;
+};
+
+int __init detect_cpu_and_cache_system(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_CACHE_H */