diff options
author | arnd@arndb.de <arnd@arndb.de> | 2006-06-19 20:33:35 +0200 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-06-21 15:01:32 +1000 |
commit | 379507181a1e330d4f5b0fabe61cd43eccf09763 (patch) | |
tree | dc986704087a40854a9f8a386d125627b20dee14 /include/asm-powerpc | |
parent | c983294872ebccd4aacf1b8dd694ac2170feadc3 (diff) | |
download | kernel_samsung_tuna-379507181a1e330d4f5b0fabe61cd43eccf09763.zip kernel_samsung_tuna-379507181a1e330d4f5b0fabe61cd43eccf09763.tar.gz kernel_samsung_tuna-379507181a1e330d4f5b0fabe61cd43eccf09763.tar.bz2 |
[POWERPC] spufs: one more fix for 64k pages
The SPU context save/restore code is currently built
for a 4k page size and we provide a _shipped version
of it since most people don't have the spu toolchain
that is needed to rebuild that code.
This patch hardcodes the data structures to a 64k
page alignment, which also guarantees 4k alignment
but unfortunately wastes 60k of memory per SPU
context that is created in the running system.
We will follow up on this with another patch to
reduce that overhead or maybe redo the context
save/restore logic to do this part entirely different,
but for now it should make experimental systems
work with either page size.
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/spu_csa.h | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index ba18d7d..964c2d3 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h @@ -86,10 +86,18 @@ struct spu_lscsa { struct spu_reg128 event_mask; struct spu_reg128 srr0; struct spu_reg128 stopped_status; - struct spu_reg128 pad[119]; /* 'ls' must be page-aligned. */ - unsigned char ls[LS_SIZE]; + + /* + * 'ls' must be page-aligned on all configurations. + * Since we don't want to rely on having the spu-gcc + * installed to build the kernel and this structure + * is used in the SPU-side code, make it 64k-page + * aligned for now. + */ + unsigned char ls[LS_SIZE] __attribute__((aligned(65536))); }; +#ifndef __SPU__ /* * struct spu_problem_collapsed - condensed problem state area, w/o pads. */ @@ -250,6 +258,7 @@ extern int spu_restore(struct spu_state *new, struct spu *spu); extern int spu_switch(struct spu_state *prev, struct spu_state *new, struct spu *spu); +#endif /* !__SPU__ */ #endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ #endif /* _SPU_CSA_H_ */ |