diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-05 09:17:33 +0900 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /arch/sparc | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) | |
download | kernel_samsung_tuna-32032df6c2f6c9c6b2ada2ce42322231824f70c2.zip kernel_samsung_tuna-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.gz kernel_samsung_tuna-32032df6c2f6c9c6b2ada2ce42322231824f70c2.tar.bz2 |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'arch/sparc')
87 files changed, 2325 insertions, 688 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index ac45aab..108197a 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,6 +26,7 @@ config SPARC select RTC_CLASS select RTC_DRV_M48T59 select HAVE_PERF_EVENTS + select PERF_USE_VMALLOC select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG @@ -42,12 +43,14 @@ config SPARC64 select HAVE_SYSCALL_WRAPPERS select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_SYSCALL_TRACEPOINTS select USE_GENERIC_SMP_HELPERS if SMP select RTC_DRV_CMOS select RTC_DRV_BQ4802 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE select HAVE_PERF_EVENTS + select PERF_USE_VMALLOC config ARCH_DEFCONFIG string @@ -219,6 +222,13 @@ config SPARC64_SMP default y depends on SPARC64 && SMP +config EARLYFB + bool "Support for early boot text console" + default y + depends on SPARC64 + help + Say Y here to enable a faster early framebuffer boot console. + choice prompt "Kernel page size" if SPARC64 default SPARC64_PAGE_SIZE_8KB diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 90d5fe2..9d3c889 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug @@ -33,4 +33,18 @@ config FRAME_POINTER depends on MCOUNT default y +config DEBUG_STRICT_USER_COPY_CHECKS + bool "Strict copy size checks" + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING + ---help--- + Enabling this option turns a certain set of sanity checks for user + copy operations into compile time failures. + + The copy_from_user() etc checks are there to help test if there + are sufficient security checks on the length argument of + the copy operation, by having gcc prove that the argument is + within bounds. + + If unsure, or if you run an older (pre 4.4) gcc, say N. + endmenu diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile index dfe272d..113225b 100644 --- a/arch/sparc/Makefile +++ b/arch/sparc/Makefile @@ -27,6 +27,7 @@ AS := $(AS) -32 LDFLAGS := -m elf32_sparc CHECKFLAGS += -D__sparc__ export BITS := 32 +UTS_MACHINE := sparc #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 @@ -46,6 +47,7 @@ CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 LDFLAGS := -m elf64_sparc export BITS := 64 +UTS_MACHINE := sparc64 KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \ -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \ diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c index 52a4208..bbf91b9 100644 --- a/arch/sparc/boot/btfixupprep.c +++ b/arch/sparc/boot/btfixupprep.c @@ -61,14 +61,14 @@ unsigned long lastfoffset = -1; unsigned long lastfrelno; btfixup *lastf; -void fatal(void) __attribute__((noreturn)); -void fatal(void) +static void fatal(void) __attribute__((noreturn)); +static void fatal(void) { fprintf(stderr, "Malformed output from objdump\n%s\n", buffer); exit(1); } -btfixup *find(int type, char *name) +static btfixup *find(int type, char *name) { int i; for (i = 0; i < last; i++) { @@ -88,7 +88,7 @@ btfixup *find(int type, char *name) return array + last - 1; } -void set_mode (char *buffer) +static void set_mode (char *buffer) { for (mode = 0;; mode++) if (buffer[mode] < '0' || buffer[mode] > '9') diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c index e8dc9ad..ac944ae 100644 --- a/arch/sparc/boot/piggyback_32.c +++ b/arch/sparc/boot/piggyback_32.c @@ -35,17 +35,17 @@ * as PROM looks for a.out image only. */ -unsigned short ld2(char *p) +static unsigned short ld2(char *p) { return (p[0] << 8) | p[1]; } -unsigned int ld4(char *p) +static unsigned int ld4(char *p) { return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; } -void st4(char *p, unsigned int x) +static void st4(char *p, unsigned int x) { p[0] = x >> 24; p[1] = x >> 16; @@ -53,7 +53,7 @@ void st4(char *p, unsigned int x) p[3] = x; } -void usage(void) +static void usage(void) { /* fs_img.gz is an image of initial ramdisk. */ fprintf(stderr, "Usage: piggyback vmlinux.aout System.map fs_img.gz\n"); @@ -61,7 +61,7 @@ void usage(void) exit(1); } -void die(char *str) +static void die(char *str) { perror (str); exit(1); diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c index c63fd1b..a26a686 100644 --- a/arch/sparc/boot/piggyback_64.c +++ b/arch/sparc/boot/piggyback_64.c @@ -32,7 +32,7 @@ /* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */ -void die(char *str) +static void die(char *str) { perror (str); exit(1); diff --git a/arch/sparc/include/asm/asm-offsets.h b/arch/sparc/include/asm/asm-offsets.h new file mode 100644 index 0000000..d370ee3 --- /dev/null +++ b/arch/sparc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/sparc/include/asm/btext.h b/arch/sparc/include/asm/btext.h new file mode 100644 index 0000000..9b2bc6b --- /dev/null +++ b/arch/sparc/include/asm/btext.h @@ -0,0 +1,6 @@ +#ifndef _SPARC_BTEXT_H +#define _SPARC_BTEXT_H + +extern int btext_find_display(void); + +#endif /* _SPARC_BTEXT_H */ diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h index 68ac109..2e46877 100644 --- a/arch/sparc/include/asm/cacheflush_32.h +++ b/arch/sparc/include/asm/cacheflush_32.h @@ -75,6 +75,7 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long) extern void sparc_flush_page_to_ram(struct page *page); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) sparc_flush_page_to_ram(page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index c433217..b953840 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -37,6 +37,7 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page); #endif extern void __flush_dcache_range(unsigned long start, unsigned long end); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); #define flush_icache_page(vma, pg) do { } while(0) diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h index 381a1b5..4269ca6 100644 --- a/arch/sparc/include/asm/elf_32.h +++ b/arch/sparc/include/asm/elf_32.h @@ -104,8 +104,6 @@ typedef struct { #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2MSB -#define USE_ELF_CORE_DUMP - #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index d42e393..ff66bb8 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -152,7 +152,6 @@ typedef struct { (x)->e_machine == EM_SPARC32PLUS) #define compat_start_thread start_thread32 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/sparc/include/asm/fcntl.h b/arch/sparc/include/asm/fcntl.h index d4d9c9d..38f37b33 100644 --- a/arch/sparc/include/asm/fcntl.h +++ b/arch/sparc/include/asm/fcntl.h @@ -1,14 +1,12 @@ #ifndef _SPARC_FCNTL_H #define _SPARC_FCNTL_H -/* open/fcntl - O_SYNC is only implemented on blocks devices and on files - located on an ext2 file system */ #define O_APPEND 0x0008 #define FASYNC 0x0040 /* fcntl, for BSD compatibility */ #define O_CREAT 0x0200 /* not fcntl */ #define O_TRUNC 0x0400 /* not fcntl */ #define O_EXCL 0x0800 /* not fcntl */ -#define O_SYNC 0x2000 +#define O_DSYNC 0x2000 /* used to be O_SYNC, see below */ #define O_NONBLOCK 0x4000 #if defined(__sparc__) && defined(__arch64__) #define O_NDELAY 0x0004 @@ -20,6 +18,21 @@ #define O_DIRECT 0x100000 /* direct disk access hint */ #define O_NOATIME 0x200000 #define O_CLOEXEC 0x400000 +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 0x800000 +#define O_SYNC (__O_SYNC|O_DSYNC) #define F_GETOWN 5 /* for sockets. */ #define F_SETOWN 6 /* for sockets. */ diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h index 4f63ed8..1620076 100644 --- a/arch/sparc/include/asm/hardirq_32.h +++ b/arch/sparc/include/asm/hardirq_32.h @@ -7,17 +7,7 @@ #ifndef __SPARC_HARDIRQ_H #define __SPARC_HARDIRQ_H -#include <linux/threads.h> -#include <linux/spinlock.h> -#include <linux/cache.h> - -/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ - #define HARDIRQ_BITS 8 +#include <asm-generic/hardirq.h> #endif /* __SPARC_HARDIRQ_H */ diff --git a/arch/sparc/include/asm/irq_32.h b/arch/sparc/include/asm/irq_32.h index ea43057..cbf4801 100644 --- a/arch/sparc/include/asm/irq_32.h +++ b/arch/sparc/include/asm/irq_32.h @@ -6,10 +6,10 @@ #ifndef _SPARC_IRQ_H #define _SPARC_IRQ_H -#include <linux/interrupt.h> - #define NR_IRQS 16 +#include <linux/interrupt.h> + #define irq_canonicalize(irq) (irq) extern void __init init_IRQ(void); diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 28a42b7..3ea5964 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -148,7 +148,7 @@ static inline unsigned long leon_load_reg(unsigned long paddr) return retval; } -extern inline void leon_srmmu_disabletlb(void) +static inline void leon_srmmu_disabletlb(void) { unsigned int retval; __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), @@ -158,7 +158,7 @@ extern inline void leon_srmmu_disabletlb(void) "i"(ASI_LEON_MMUREGS) : "memory"); } -extern inline void leon_srmmu_enabletlb(void) +static inline void leon_srmmu_enabletlb(void) { unsigned int retval; __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0), @@ -190,7 +190,7 @@ extern void leon_init_IRQ(void); extern unsigned long last_valid_pfn; -extern inline unsigned long sparc_leon3_get_dcachecfg(void) +static inline unsigned long sparc_leon3_get_dcachecfg(void) { unsigned int retval; __asm__ __volatile__("lda [%1] %2, %0\n\t" : @@ -201,7 +201,7 @@ extern inline unsigned long sparc_leon3_get_dcachecfg(void) } /* enable snooping */ -extern inline void sparc_leon3_enable_snooping(void) +static inline void sparc_leon3_enable_snooping(void) { __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" "set 0x800000, %%l2\n\t" @@ -209,7 +209,14 @@ extern inline void sparc_leon3_enable_snooping(void) "sta %%l2, [%%g0] 2\n\t" : : : "l1", "l2"); }; -extern inline void sparc_leon3_disable_cache(void) +static inline int sparc_leon3_snooping_enabled(void) +{ + u32 cctrl; + __asm__ __volatile__("lda [%%g0] 2, %0\n\t" : "=r"(cctrl)); + return (cctrl >> 23) & 1; +}; + +static inline void sparc_leon3_disable_cache(void) { __asm__ __volatile__ ("lda [%%g0] 2, %%l1\n\t" "set 0x00000f, %%l2\n\t" @@ -340,6 +347,30 @@ extern int leon_flush_needed(void); extern void leon_switch_mm(void); extern int srmmu_swprobe_trace; +#ifdef CONFIG_SMP +extern int leon_smp_nrcpus(void); +extern void leon_clear_profile_irq(int cpu); +extern void leon_smp_done(void); +extern void leon_boot_cpus(void); +extern int leon_boot_one_cpu(int i); +void leon_init_smp(void); +extern void cpu_probe(void); +extern void cpu_idle(void); +extern void init_IRQ(void); +extern void cpu_panic(void); +extern int __leon_processor_id(void); +void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu); + +extern unsigned int real_irq_entry[], smpleon_ticker[]; +extern unsigned int patchme_maybe_smp_msg[]; +extern unsigned long trapbase_cpu1[]; +extern unsigned long trapbase_cpu2[]; +extern unsigned long trapbase_cpu3[]; +extern unsigned int t_nmi[], linux_trap_ipi15_leon[]; +extern unsigned int linux_trap_ipi15_sun4m[]; + +#endif /* CONFIG_SMP */ + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -356,6 +387,10 @@ extern int srmmu_swprobe_trace; #define leon_switch_mm() do {} while (0) #define leon_init_IRQ() do {} while (0) #define init_leon() do {} while (0) +#define leon_smp_done() do {} while (0) +#define leon_boot_cpus() do {} while (0) +#define leon_boot_one_cpu(i) 1 +#define leon_init_smp() do {} while (0) #endif /* !defined(CONFIG_SPARC_LEON) */ diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index b63e51c..b0576df 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -16,8 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -#define PCI_CACHE_LINE_BYTES 64 - static inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0ff92fa..f3cb790 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -41,8 +41,8 @@ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define VMALLOC_START _AC(0x0000000100000000,UL) -#define VMALLOC_END _AC(0x0000000200000000,UL) -#define VMEMMAP_BASE _AC(0x0000000200000000,UL) +#define VMALLOC_END _AC(0x0000010000000000,UL) +#define VMEMMAP_BASE _AC(0x0000010000000000,UL) #define vmemmap ((struct page *)VMEMMAP_BASE) diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 82a190d..f845828 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -1,3 +1,4 @@ +#include <linux/of.h> /* linux/of.h gets to determine #include ordering */ #ifndef _SPARC_PROM_H #define _SPARC_PROM_H #ifdef __KERNEL__ @@ -28,50 +29,11 @@ #define of_prop_cmp(s1, s2) strcasecmp((s1), (s2)) #define of_node_cmp(s1, s2) strcmp((s1), (s2)) -typedef u32 phandle; -typedef u32 ihandle; - -struct property { - char *name; - int length; - void *value; - struct property *next; - unsigned long _flags; - unsigned int unique_id; -}; - -struct of_irq_controller; -struct device_node { - const char *name; - const char *type; - phandle node; - char *path_component_name; - char *full_name; - - struct property *properties; - struct property *deadprops; /* removed properties */ - struct device_node *parent; - struct device_node *child; - struct device_node *sibling; - struct device_node *next; /* next device of same type */ - struct device_node *allnext; /* next in list of all nodes */ - struct proc_dir_entry *pde; /* this node's proc directory */ - struct kref kref; - unsigned long _flags; - void *data; - unsigned int unique_id; - - struct of_irq_controller *irq_trans; -}; - struct of_irq_controller { unsigned int (*irq_build)(struct device_node *, unsigned int, void *); void *data; }; -#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) -#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) - extern struct device_node *of_find_node_by_cpuid(int cpuid); extern int of_set_property(struct device_node *node, const char *name, void *val, int len); extern struct mutex of_set_property_mutex; @@ -89,15 +51,6 @@ extern void prom_build_devicetree(void); extern void of_populate_present_mask(void); extern void of_fill_in_cpu_data(void); -/* Dummy ref counting routines - to be implemented later */ -static inline struct device_node *of_node_get(struct device_node *node) -{ - return node; -} -static inline void of_node_put(struct device_node *node) -{ -} - /* These routines are here to provide compatibility with how powerpc * handles IRQ mapping for OF device nodes. We precompute and permanently * register them in the of_device objects, whereas powerpc computes them @@ -108,12 +61,6 @@ static inline void irq_dispose_mapping(unsigned int virq) { } -/* - * NB: This is here while we transition from using asm/prom.h - * to linux/of.h - */ -#include <linux/of.h> - extern struct device_node *of_console_device; extern char *of_console_path; extern char *of_console_options; diff --git a/arch/sparc/include/asm/rwsem.h b/arch/sparc/include/asm/rwsem.h index 1dc129a..6e56210 100644 --- a/arch/sparc/include/asm/rwsem.h +++ b/arch/sparc/include/asm/rwsem.h @@ -35,8 +35,8 @@ struct rw_semaphore { #endif #define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ - __RWSEM_DEP_MAP_INIT(name) } +{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ + LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index 58101dc..841905c 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h @@ -106,6 +106,15 @@ static inline int hard_smp4d_processor_id(void) return cpuid; } +extern inline int hard_smpleon_processor_id(void) +{ + int cpuid; + __asm__ __volatile__("rd %%asr17,%0\n\t" + "srl %0,28,%0" : + "=&r" (cpuid) : ); + return cpuid; +} + #ifndef MODULE static inline int hard_smp_processor_id(void) { diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 3a5ae3d..9d3fefc 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h @@ -56,6 +56,8 @@ #define SO_TIMESTAMPING 0x0023 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_RXQ_OVFL 0x0024 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630c..7f9b9db 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include <asm/psr.h> -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( @@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e5147..073936a 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01..9c454fd 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h index 6c5fddb..edf196e 100644 --- a/arch/sparc/include/asm/string_32.h +++ b/arch/sparc/include/asm/string_32.h @@ -16,8 +16,6 @@ #ifdef __KERNEL__ extern void __memmove(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t); -extern __kernel_size_t __memset(void *,int,__kernel_size_t); #ifndef EXPORT_SYMTAB_STROPS @@ -32,82 +30,10 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t); }) #define __HAVE_ARCH_MEMCPY - -static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - extern void __copy_1page(void *, const void *); - - if(n <= 32) { - __builtin_memcpy(to, from, n); - } else if (((unsigned int) to & 7) != 0) { - /* Destination is not aligned on the double-word boundary */ - __memcpy(to, from, n); - } else { - switch(n) { - case PAGE_SIZE: - __copy_1page(to, from); - break; - default: - __memcpy(to, from, n); - break; - } - } - return to; -} - -static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n) -{ - __memcpy(to, from, n); - return to; -} - -#undef memcpy -#define memcpy(t, f, n) \ -(__builtin_constant_p(n) ? \ - __constant_memcpy((t),(f),(n)) : \ - __nonconstant_memcpy((t),(f),(n))) +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET - -static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count) -{ - extern void bzero_1page(void *); - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) { - if(count == PAGE_SIZE) - bzero_1page(s); - else - __bzero(s, count); - } else { - __memset(s, c, count); - } - return s; -} - -static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if(!c) - __bzero(s, count); - else - __memset(s, c, count); - return s; -} - -static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count) -{ - __memset(s, c, count); - return s; -} - -#undef memset -#define memset(s, c, count) \ -(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \ - __constant_c_and_count_memset((s), (c), (count)) : \ - __constant_c_memset((s), (c), (count))) \ - : __nonconstant_memset((s), (c), (count))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h index 43161f2..9623bc2 100644 --- a/arch/sparc/include/asm/string_64.h +++ b/arch/sparc/include/asm/string_64.h @@ -15,8 +15,6 @@ #include <asm/asi.h> -extern void *__memset(void *,int,__kernel_size_t); - #ifndef EXPORT_SYMTAB_STROPS /* First the mem*() things. */ @@ -24,29 +22,10 @@ extern void *__memset(void *,int,__kernel_size_t); extern void *memmove(void *, const void *, __kernel_size_t); #define __HAVE_ARCH_MEMCPY -extern void *memcpy(void *, const void *, __kernel_size_t); +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) #define __HAVE_ARCH_MEMSET -extern void *__builtin_memset(void *,int,__kernel_size_t); - -static inline void *__constant_memset(void *s, int c, __kernel_size_t count) -{ - extern __kernel_size_t __bzero(void *, __kernel_size_t); - - if (!c) { - __bzero(s, count); - return s; - } else - return __memset(s, c, count); -} - -#undef memset -#define memset(s, c, count) \ -((__builtin_constant_p(count) && (count) <= 32) ? \ - __builtin_memset((s), (c), (count)) : \ - (__builtin_constant_p(c) ? \ - __constant_memset((s), (c), (count)) : \ - __memset((s), (c), (count)))) +#define memset(s, c, count) __builtin_memset(s, c, count) #define __HAVE_ARCH_MEMSCAN diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 25e848f..d47a98e 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -63,6 +63,10 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ : : : "memory"); \ } while (0) +/* The kernel always executes in TSO memory model these days, + * and furthermore most sparc64 chips implement more stringent + * memory ordering than required by the specifications. + */ #define mb() membar_safe("#StoreLoad") #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 1b45a7b..7257ebb 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -227,6 +227,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); /* flag bit 8 is available */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ +#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ /* flag bit 11 is available */ /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as @@ -246,6 +247,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1<<TIF_FREEZE) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 8303ac4..489d2ba 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -260,8 +260,23 @@ static inline unsigned long __copy_to_user(void __user *to, const void *from, un return __copy_user(to, (__force void __user *) from, n); } +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + int sz = __compiletime_object_size(to); + + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return -EFAULT; + } + if (n && __access_ok((unsigned long) from, n)) return __copy_user((__force void __user *) to, from, n); else diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 9ea271e..dbc1416 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -6,6 +6,7 @@ */ #ifdef __KERNEL__ +#include <linux/errno.h> #include <linux/compiler.h> #include <linux/string.h> #include <linux/thread_info.h> @@ -204,6 +205,14 @@ __asm__ __volatile__( \ extern int __get_user_bad(void); +extern void copy_from_user_overflow(void) +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS + __compiletime_error("copy_from_user() buffer size is not provably correct") +#else + __compiletime_warning("copy_from_user() buffer size is not provably correct") +#endif +; + extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long size); @@ -212,10 +221,16 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long ret = ___copy_from_user(to, from, size); - - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); + unsigned long ret = (unsigned long) -EFAULT; + int sz = __compiletime_object_size(to); + + if (likely(sz == -1 || sz >= size)) { + ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) + ret = copy_from_user_fixup(to, from, size); + } else { + copy_from_user_overflow(); + } return ret; } #define __copy_from_user copy_from_user diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 42f2316..cb4b9bf 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -396,8 +396,9 @@ #define __NR_pwritev 325 #define __NR_rt_tgsigqueueinfo 326 #define __NR_perf_event_open 327 +#define __NR_recvmmsg 328 -#define NR_SYSCALLS 328 +#define NR_syscalls 329 #ifdef __32bit_syscall_numbers__ /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 5b47fab..c631614 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -72,7 +72,7 @@ obj-y += dma.o obj-$(CONFIG_SPARC32_PCI) += pcic.o obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o -obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o +obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o obj-$(CONFIG_SPARC64_SMP) += hvtramp.o obj-y += auxio_$(BITS).o @@ -87,6 +87,7 @@ obj-$(CONFIG_KGDB) += kgdb_$(BITS).o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o CFLAGS_REMOVE_ftrace.o := -pg +obj-$(CONFIG_EARLYFB) += btext.o obj-$(CONFIG_STACKTRACE) += stacktrace.o # sparc64 PCI obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 9c11582..71ec90b 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -10,7 +10,6 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/miscdevice.h> -#include <linux/smp_lock.h> #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> @@ -76,7 +75,6 @@ static inline void apc_free(struct of_device *op) static int apc_open(struct inode *inode, struct file *f) { - cycle_kernel_lock(); return 0; } @@ -87,61 +85,46 @@ static int apc_release(struct inode *inode, struct file *f) static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) { - __u8 inarg, __user *arg; - - arg = (__u8 __user *) __arg; - - lock_kernel(); + __u8 inarg, __user *arg = (__u8 __user *) __arg; switch (cmd) { case APCIOCGFANCTL: - if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) { - unlock_kernel(); + if (put_user(apc_readb(APC_FANCTL_REG) & APC_REGMASK, arg)) return -EFAULT; - } break; case APCIOCGCPWR: - if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) { - unlock_kernel(); + if (put_user(apc_readb(APC_CPOWER_REG) & APC_REGMASK, arg)) return -EFAULT; - } break; case APCIOCGBPORT: - if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) { - unlock_kernel(); + if (put_user(apc_readb(APC_BPORT_REG) & APC_BPMASK, arg)) return -EFAULT; - } break; case APCIOCSFANCTL: - if (get_user(inarg, arg)) { - unlock_kernel(); + if (get_user(inarg, arg)) return -EFAULT; - } apc_writeb(inarg & APC_REGMASK, APC_FANCTL_REG); break; + case APCIOCSCPWR: - if (get_user(inarg, arg)) { - unlock_kernel(); + if (get_user(inarg, arg)) return -EFAULT; - } apc_writeb(inarg & APC_REGMASK, APC_CPOWER_REG); break; + case APCIOCSBPORT: - if (get_user(inarg, arg)) { - unlock_kernel(); + if (get_user(inarg, arg)) return -EFAULT; - } apc_writeb(inarg & APC_BPMASK, APC_BPORT_REG); break; + default: - unlock_kernel(); return -EINVAL; }; - unlock_kernel(); return 0; } diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index 45c4123..ee8d214 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c @@ -28,6 +28,7 @@ void __init auxio_probe(void) struct resource r; switch (sparc_cpu_model) { + case sparc_leon: case sun4d: case sun4: return; diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c new file mode 100644 index 0000000..8cc2d56 --- /dev/null +++ b/arch/sparc/kernel/btext.c @@ -0,0 +1,673 @@ +/* + * Procedures for drawing on the screen early on in the boot process. + * + * Benjamin Herrenschmidt <benh@kernel.crashing.org> + */ +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/console.h> + +#include <asm/btext.h> +#include <asm/oplib.h> +#include <asm/io.h> + +#define NO_SCROLL + +#ifndef NO_SCROLL +static void scrollscreen(void); +#endif + +static void draw_byte(unsigned char c, long locX, long locY); +static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); +static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); +static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); + +#define __force_data __attribute__((__section__(".data"))) + +static int g_loc_X __force_data; +static int g_loc_Y __force_data; +static int g_max_loc_X __force_data; +static int g_max_loc_Y __force_data; + +static int dispDeviceRowBytes __force_data; +static int dispDeviceDepth __force_data; +static int dispDeviceRect[4] __force_data; +static unsigned char *dispDeviceBase __force_data; + +#define cmapsz (16*256) + +static unsigned char vga_font[cmapsz]; + +static int __init btext_initialize(unsigned int node) +{ + unsigned int width, height, depth, pitch; + unsigned long address = 0; + u32 prop; + + if (prom_getproperty(node, "width", (char *)&width, 4) < 0) + return -EINVAL; + if (prom_getproperty(node, "height", (char *)&height, 4) < 0) + return -EINVAL; + if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0) + return -EINVAL; + pitch = width * ((depth + 7) / 8); + + if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 && + prop != 0xffffffffu) + pitch = prop; + + if (pitch == 1) + pitch = 0x1000; + + if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0) + address = prop; + + /* FIXME: Add support for PCI reg properties. Right now, only + * reliable on macs + */ + if (address == 0) + return -EINVAL; + + g_loc_X = 0; + g_loc_Y = 0; + g_max_loc_X = width / 8; + g_max_loc_Y = height / 16; + dispDeviceBase = (unsigned char *)address; + dispDeviceRowBytes = pitch; + dispDeviceDepth = depth == 15 ? 16 : depth; + dispDeviceRect[0] = dispDeviceRect[1] = 0; + dispDeviceRect[2] = width; + dispDeviceRect[3] = height; + + return 0; +} + +/* Calc the base address of a given point (x,y) */ +static unsigned char * calc_base(int x, int y) +{ + unsigned char *base = dispDeviceBase; + + base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3); + base += (y + dispDeviceRect[1]) * dispDeviceRowBytes; + return base; +} + +static void btext_clearscreen(void) +{ + unsigned int *base = (unsigned int *)calc_base(0, 0); + unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * + (dispDeviceDepth >> 3)) >> 2; + int i,j; + + for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) + { + unsigned int *ptr = base; + for(j=width; j; --j) + *(ptr++) = 0; + base += (dispDeviceRowBytes >> 2); + } +} + +#ifndef NO_SCROLL +static void scrollscreen(void) +{ + unsigned int *src = (unsigned int *)calc_base(0,16); + unsigned int *dst = (unsigned int *)calc_base(0,0); + unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * + (dispDeviceDepth >> 3)) >> 2; + int i,j; + + for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) + { + unsigned int *src_ptr = src; + unsigned int *dst_ptr = dst; + for(j=width; j; --j) + *(dst_ptr++) = *(src_ptr++); + src += (dispDeviceRowBytes >> 2); + dst += (dispDeviceRowBytes >> 2); + } + for (i=0; i<16; i++) + { + unsigned int *dst_ptr = dst; + for(j=width; j; --j) + *(dst_ptr++) = 0; + dst += (dispDeviceRowBytes >> 2); + } +} +#endif /* ndef NO_SCROLL */ + +void btext_drawchar(char c) +{ + int cline = 0; +#ifdef NO_SCROLL + int x; +#endif + switch (c) { + case '\b': + if (g_loc_X > 0) + --g_loc_X; + break; + case '\t': + g_loc_X = (g_loc_X & -8) + 8; + break; + case '\r': + g_loc_X = 0; + break; + case '\n': + g_loc_X = 0; + g_loc_Y++; + cline = 1; + break; + default: + draw_byte(c, g_loc_X++, g_loc_Y); + } + if (g_loc_X >= g_max_loc_X) { + g_loc_X = 0; + g_loc_Y++; + cline = 1; + } +#ifndef NO_SCROLL + while (g_loc_Y >= g_max_loc_Y) { + scrollscreen(); + g_loc_Y--; + } +#else + /* wrap around from bottom to top of screen so we don't + waste time scrolling each line. -- paulus. */ + if (g_loc_Y >= g_max_loc_Y) + g_loc_Y = 0; + if (cline) { + for (x = 0; x < g_max_loc_X; ++x) + draw_byte(' ', x, g_loc_Y); + } +#endif +} + +static void btext_drawtext(const char *c, unsigned int len) +{ + while (len--) + btext_drawchar(*c++); +} + +static void draw_byte(unsigned char c, long locX, long locY) +{ + unsigned char *base = calc_base(locX << 3, locY << 4); + unsigned char *font = &vga_font[((unsigned int)c) * 16]; + int rb = dispDeviceRowBytes; + + switch(dispDeviceDepth) { + case 24: + case 32: + draw_byte_32(font, (unsigned int *)base, rb); + break; + case 15: + case 16: + draw_byte_16(font, (unsigned int *)base, rb); + break; + case 8: + draw_byte_8(font, (unsigned int *)base, rb); + break; + } +} + +static unsigned int expand_bits_8[16] = { + 0x00000000, + 0x000000ff, + 0x0000ff00, + 0x0000ffff, + 0x00ff0000, + 0x00ff00ff, + 0x00ffff00, + 0x00ffffff, + 0xff000000, + 0xff0000ff, + 0xff00ff00, + 0xff00ffff, + 0xffff0000, + 0xffff00ff, + 0xffffff00, + 0xffffffff +}; + +static unsigned int expand_bits_16[4] = { + 0x00000000, + 0x0000ffff, + 0xffff0000, + 0xffffffff +}; + + +static void draw_byte_32(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0xFFFFFFFFUL; + int bg = 0x00000000UL; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (-(bits >> 7) & fg) ^ bg; + base[1] = (-((bits >> 6) & 1) & fg) ^ bg; + base[2] = (-((bits >> 5) & 1) & fg) ^ bg; + base[3] = (-((bits >> 4) & 1) & fg) ^ bg; + base[4] = (-((bits >> 3) & 1) & fg) ^ bg; + base[5] = (-((bits >> 2) & 1) & fg) ^ bg; + base[6] = (-((bits >> 1) & 1) & fg) ^ bg; + base[7] = (-(bits & 1) & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void draw_byte_16(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0xFFFFFFFFUL; + int bg = 0x00000000UL; + unsigned int *eb = (int *)expand_bits_16; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (eb[bits >> 6] & fg) ^ bg; + base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; + base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; + base[3] = (eb[bits & 3] & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void draw_byte_8(unsigned char *font, unsigned int *base, int rb) +{ + int l, bits; + int fg = 0x0F0F0F0FUL; + int bg = 0x00000000UL; + unsigned int *eb = (int *)expand_bits_8; + + for (l = 0; l < 16; ++l) + { + bits = *font++; + base[0] = (eb[bits >> 4] & fg) ^ bg; + base[1] = (eb[bits & 0xf] & fg) ^ bg; + base = (unsigned int *) ((char *)base + rb); + } +} + +static void btext_console_write(struct console *con, const char *s, + unsigned int n) +{ + btext_drawtext(s, n); +} + +static struct console btext_console = { + .name = "btext", + .write = btext_console_write, + .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME, + .index = 0, +}; + +int __init btext_find_display(void) +{ + unsigned int node; + char type[32]; + int ret; + + node = prom_inst2pkg(prom_stdout); + if (prom_getproperty(node, "device_type", type, 32) < 0) + return -ENODEV; + if (strcmp(type, "display")) + return -ENODEV; + + ret = btext_initialize(node); + if (!ret) { + btext_clearscreen(); + register_console(&btext_console); + } + return ret; +} + +static unsigned char vga_font[cmapsz] = { +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd, +0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff, +0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe, +0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, +0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, +0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00, +0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd, +0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e, +0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30, +0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63, +0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8, +0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e, +0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, +0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb, +0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6, +0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, +0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0, +0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c, +0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, +0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, +0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c, +0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18, +0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, +0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30, +0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18, +0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, +0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe, +0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, +0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18, +0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, +0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, +0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, +0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde, +0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, +0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0, +0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c, +0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, +0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, +0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c, +0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, +0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7, +0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, +0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, +0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, +0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, +0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, +0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18, +0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, +0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, +0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, +0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, +0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06, +0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60, +0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb, +0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, +0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60, +0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30, +0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3, +0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18, +0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, +0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, +0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00, +0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe, +0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, +0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, +0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06, +0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe, +0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, +0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18, +0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66, +0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6, +0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00, +0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b, +0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c, +0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00, +0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, +0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, +0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, +0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00, +0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, +0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, +0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18, +0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, +0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00, +0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, +0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, +0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc, +0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, +0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, +0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, +0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0, +0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06, +0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, +0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, +0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36, +0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44, +0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, +0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, +0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, +0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, +0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, +0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, +0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, +0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0, +0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, +0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, +0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, +0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8, +0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66, +0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, +0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66, +0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60, +0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, +0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, +0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, +0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, +0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00, +0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c, +0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c, +0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00, +0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, +}; diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 1446df9..e447938 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -185,6 +185,17 @@ static const struct manufacturer_info __initconst manufacturer_info[] = { FPU(-1, NULL) } },{ + 0xF, /* Aeroflex Gaisler */ + .cpu_info = { + CPU(3, "LEON"), + CPU(-1, NULL) + }, + .fpu_info = { + FPU(2, "GRFPU"), + FPU(3, "GRFPU-Lite"), + FPU(-1, NULL) + } +},{ 0x17, .cpu_info = { CPU_PMU(0x10, "TI UltraSparc I (SpitFire)", "ultra12"), diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index f41ecc5..1504df8 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -400,6 +400,39 @@ linux_trap_ipi15_sun4d: /* FIXME */ 1: b,a 1b +#ifdef CONFIG_SPARC_LEON + + .globl smpleon_ticker + /* SMP per-cpu ticker interrupts are handled specially. */ +smpleon_ticker: + SAVE_ALL + or %l0, PSR_PIL, %g2 + wr %g2, 0x0, %psr + WRITE_PAUSE + wr %g2, PSR_ET, %psr + WRITE_PAUSE + call leon_percpu_timer_interrupt + add %sp, STACKFRAME_SZ, %o0 + wr %l0, PSR_ET, %psr + WRITE_PAUSE + RESTORE_ALL + + .align 4 + .globl linux_trap_ipi15_leon +linux_trap_ipi15_leon: + SAVE_ALL + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call leon_cross_call_irq + nop + b ret_trap_lockless_ipi + clr %l6 + +#endif /* CONFIG_SPARC_LEON */ + #endif /* CONFIG_SMP */ /* This routine handles illegal instructions and privileged @@ -1261,7 +1294,7 @@ linux_sparc_syscall: sethi %hi(PSR_SYSCALL), %l4 or %l0, %l4, %l0 /* Direct access to user regs, must faster. */ - cmp %g1, NR_SYSCALLS + cmp %g1, NR_syscalls bgeu linux_sparc_ni_syscall sll %g1, 2, %l4 ld [%l7 + %l4], %l7 diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index d3b1a30..29973da 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -4,6 +4,7 @@ #include <linux/percpu.h> #include <linux/init.h> #include <linux/list.h> +#include <trace/syscall.h> #include <asm/ftrace.h> @@ -91,3 +92,13 @@ int __init ftrace_dyn_arch_init(void *data) } #endif +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned int sys_call_table[]; + +unsigned long __init arch_syscall_addr(int nr) +{ + return (unsigned long)sys_call_table[nr]; +} + +#endif diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 439d82a..21bb259 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -811,9 +811,31 @@ found_version: got_prop: #ifdef CONFIG_SPARC_LEON /* no cpu-type check is needed, it is a SPARC-LEON */ +#ifdef CONFIG_SMP + ba leon_smp_init + nop + + .global leon_smp_init +leon_smp_init: + sethi %hi(boot_cpu_id), %g1 ! master always 0 + stb %g0, [%g1 + %lo(boot_cpu_id)] + sethi %hi(boot_cpu_id4), %g1 ! master always 0 + stb %g0, [%g1 + %lo(boot_cpu_id4)] + + rd %asr17,%g1 + srl %g1,28,%g1 + + cmp %g0,%g1 + beq sun4c_continue_boot !continue with master + nop + + ba leon_smp_cpu_startup + nop +#else ba sun4c_continue_boot nop #endif +#endif set cputypval, %o2 ldub [%o2 + 0x4], %l1 diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 7690cc2..5fad949 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -11,6 +11,7 @@ #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/iommu-helper.h> +#include <linux/bitmap.h> #ifdef CONFIG_PCI #include <linux/pci.h> @@ -169,7 +170,7 @@ void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long np entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; - iommu_area_free(arena->map, entry, npages); + bitmap_clear(arena->map, entry, npages); } int iommu_table_init(struct iommu *iommu, int tsbsize, diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 9f61fd8..3c8c44f 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -48,8 +48,13 @@ #include <asm/dma.h> #include <asm/iommu.h> #include <asm/io-unit.h> +#include <asm/leon.h> +#ifdef CONFIG_SPARC_LEON +#define mmu_inval_dma_area(p, l) leon_flush_dcache_all() +#else #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ +#endif static struct resource *_sparc_find_resource(struct resource *r, unsigned long); diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8ab1d47..8d6882b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -187,7 +187,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif - seq_printf(p, " %9s", irq_desc[i].chip->typename); + seq_printf(p, " %9s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) @@ -484,7 +484,7 @@ static void sun4v_virq_eoi(unsigned int virt_irq) } static struct irq_chip sun4u_irq = { - .typename = "sun4u", + .name = "sun4u", .enable = sun4u_irq_enable, .disable = sun4u_irq_disable, .eoi = sun4u_irq_eoi, @@ -492,7 +492,7 @@ static struct irq_chip sun4u_irq = { }; static struct irq_chip sun4v_irq = { - .typename = "sun4v", + .name = "sun4v", .enable = sun4v_irq_enable, .disable = sun4v_irq_disable, .eoi = sun4v_irq_eoi, @@ -500,7 +500,7 @@ static struct irq_chip sun4v_irq = { }; static struct irq_chip sun4v_virq = { - .typename = "vsun4v", + .name = "vsun4v", .enable = sun4v_virq_enable, .disable = sun4v_virq_disable, .eoi = sun4v_virq_eoi, @@ -785,14 +785,14 @@ void fixup_irqs(void) for (irq = 0; irq < NR_IRQS; irq++) { unsigned long flags; - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); if (irq_desc[irq].action && !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].affinity); } - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } tick_ops->disable_irq(); diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index 3bc6527..6716584 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -46,6 +46,9 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; int __kprobes arch_prepare_kprobe(struct kprobe *p) { + if ((unsigned long) p->addr & 0x3UL) + return -EILSEQ; + p->ainsn.insn[0] = *p->addr; flushi(&p->ainsn.insn[0]); diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 3ea6e8c..1d36147 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: #ifdef CONFIG_SPARSEMEM_VMEMMAP /* Do not use the TSB for vmemmap. */ - mov (VMEMMAP_BASE >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMEMMAP_BASE >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4,%g5 bgeu,pn %xcc, kvmap_vmemmap nop @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 blu,pn %xcc, kvmap_dtlb_longpath - mov (VMALLOC_END >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMALLOC_END >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4, %g5 bgeu,pn %xcc, kvmap_dtlb_longpath nop diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index adf5f27..df39a0f 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> +#include <linux/bitmap.h> #include <asm/hypervisor.h> #include <asm/iommu.h> @@ -1242,13 +1243,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name) snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); err = request_irq(lp->cfg.rx_irq, ldc_rx, - IRQF_SAMPLE_RANDOM | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, lp->rx_irq_name, lp); if (err) return err; err = request_irq(lp->cfg.tx_irq, ldc_tx, - IRQF_SAMPLE_RANDOM | IRQF_SHARED, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, lp->tx_irq_name, lp); if (err) { free_irq(lp->cfg.rx_irq, lp); @@ -1875,7 +1876,7 @@ EXPORT_SYMBOL(ldc_read); static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) { struct iommu_arena *arena = &iommu->arena; - unsigned long n, i, start, end, limit; + unsigned long n, start, end, limit; int pass; limit = arena->limit; @@ -1883,7 +1884,7 @@ static long arena_alloc(struct ldc_iommu *iommu, unsigned long npages) pass = 0; again: - n = find_next_zero_bit(arena->map, limit, start); + n = bitmap_find_next_zero_area(arena->map, limit, start, npages, 0); end = n + npages; if (unlikely(end >= limit)) { if (likely(pass < 1)) { @@ -1896,16 +1897,7 @@ again: return -1; } } - - for (i = n; i < end; i++) { - if (test_bit(i, arena->map)) { - start = i + 1; - goto again; - } - } - - for (i = n; i < end; i++) - __set_bit(i, arena->map); + bitmap_set(arena->map, n, npages); arena->hint = end; diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 54d8a5b..87f1760 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -12,11 +12,14 @@ #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/of_device.h> + #include <asm/oplib.h> #include <asm/timer.h> #include <asm/prom.h> #include <asm/leon.h> #include <asm/leon_amba.h> +#include <asm/traps.h> +#include <asm/cacheflush.h> #include "prom.h" #include "irq.h" @@ -115,6 +118,21 @@ void __init leon_init_timers(irq_handler_t counter_fn) (((1000000 / 100) - 1))); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); +#ifdef CONFIG_SMP + leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs; + leon_percpu_timer_dev[0].irq = leon3_gptimer_irq+1; + + if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & + (1<<LEON3_GPTIMER_SEPIRQ))) { + prom_printf("irq timer not configured with seperate irqs \n"); + BUG(); + } + + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); +# endif + } else { printk(KERN_ERR "No Timer/irqctrl found\n"); BUG(); @@ -130,11 +148,41 @@ void __init leon_init_timers(irq_handler_t counter_fn) prom_halt(); } +# ifdef CONFIG_SMP + { + unsigned long flags; + struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)]; + + /* For SMP we use the level 14 ticker, however the bootup code + * has copied the firmwares level 14 vector into boot cpu's + * trap table, we must fix this now or we get squashed. + */ + local_irq_save(flags); + + patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ + + /* Adjust so that we jump directly to smpleon_ticker */ + trap_table->inst_three += smpleon_ticker - real_irq_entry; + + local_flush_cache_all(); + local_irq_restore(flags); + } +# endif + if (leon3_gptimer_regs) { LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); + +#ifdef CONFIG_SMP + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | + LEON3_GPTIMER_IRQEN); +#endif + } } @@ -175,6 +223,42 @@ void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) } } +#ifdef CONFIG_SMP + +void leon_set_cpu_int(int cpu, int level) +{ + unsigned long mask; + mask = get_irqmask(level); + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask); +} + +static void leon_clear_ipi(int cpu, int level) +{ + unsigned long mask; + mask = get_irqmask(level); + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask<<16); +} + +static void leon_set_udt(int cpu) +{ +} + +void leon_clear_profile_irq(int cpu) +{ +} + +void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) +{ + unsigned long mask, flags, *addr; + mask = get_irqmask(irq_nr); + local_irq_save(flags); + addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]); + LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask))); + local_irq_restore(flags); +} + +#endif + void __init leon_init_IRQ(void) { sparc_init_timers = leon_init_timers; diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c new file mode 100644 index 0000000..05c0dad --- /dev/null +++ b/arch/sparc/kernel/leon_smp.c @@ -0,0 +1,468 @@ +/* leon_smp.c: Sparc-Leon SMP support. + * + * based on sun4m_smp.c + * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB + */ + +#include <asm/head.h> + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/threads.h> +#include <linux/smp.h> +#include <linux/smp_lock.h> +#include <linux/interrupt.h> +#include <linux/kernel_stat.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/profile.h> +#include <linux/pm.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#include <asm/ptrace.h> +#include <asm/atomic.h> +#include <asm/irq_regs.h> + +#include <asm/delay.h> +#include <asm/irq.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/oplib.h> +#include <asm/cpudata.h> +#include <asm/asi.h> +#include <asm/leon.h> +#include <asm/leon_amba.h> + +#ifdef CONFIG_SPARC_LEON + +#include "irq.h" + +extern ctxd_t *srmmu_ctx_table_phys; +static int smp_processors_ready; +extern volatile unsigned long cpu_callin_map[NR_CPUS]; +extern unsigned char boot_cpu_id; +extern cpumask_t smp_commenced_mask; +void __init leon_configure_cache_smp(void); + +static inline unsigned long do_swap(volatile unsigned long *ptr, + unsigned long val) +{ + __asm__ __volatile__("swapa [%1] %2, %0\n\t" : "=&r"(val) + : "r"(ptr), "i"(ASI_LEON_DCACHE_MISS) + : "memory"); + return val; +} + +static void smp_setup_percpu_timer(void); + +void __cpuinit leon_callin(void) +{ + int cpuid = hard_smpleon_processor_id(); + + local_flush_cache_all(); + local_flush_tlb_all(); + leon_configure_cache_smp(); + + /* Get our local ticker going. */ + smp_setup_percpu_timer(); + + calibrate_delay(); + smp_store_cpu_info(cpuid); + + local_flush_cache_all(); + local_flush_tlb_all(); + + /* + * Unblock the master CPU _only_ when the scheduler state + * of all secondary CPUs will be up-to-date, so after + * the SMP initialization the master will be just allowed + * to call the scheduler code. + * Allow master to continue. + */ + do_swap(&cpu_callin_map[cpuid], 1); + + local_flush_cache_all(); + local_flush_tlb_all(); + + cpu_probe(); + + /* Fix idle thread fields. */ + __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) + : "memory" /* paranoid */); + + /* Attach to the address space of init_task. */ + atomic_inc(&init_mm.mm_count); + current->active_mm = &init_mm; + + while (!cpu_isset(cpuid, smp_commenced_mask)) + mb(); + + local_irq_enable(); + cpu_set(cpuid, cpu_online_map); +} + +/* + * Cycle through the processors asking the PROM to start each one. + */ + +extern struct linux_prom_registers smp_penguin_ctable; + +void __init leon_configure_cache_smp(void) +{ + unsigned long cfg = sparc_leon3_get_dcachecfg(); + int me = smp_processor_id(); + + if (ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg) > 4) { + printk(KERN_INFO "Note: SMP with snooping only works on 4k cache, found %dk(0x%x) on cpu %d, disabling caches\n", + (unsigned int)ASI_LEON3_SYSCTRL_CFG_SSIZE(cfg), + (unsigned int)cfg, (unsigned int)me); + sparc_leon3_disable_cache(); + } else { + if (cfg & ASI_LEON3_SYSCTRL_CFG_SNOOPING) { + sparc_leon3_enable_snooping(); + } else { + printk(KERN_INFO "Note: You have to enable snooping in the vhdl model cpu %d, disabling caches\n", + me); + sparc_leon3_disable_cache(); + } + } + + local_flush_cache_all(); + local_flush_tlb_all(); +} + +void leon_smp_setbroadcast(unsigned int mask) +{ + int broadcast = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_BROADCAST) & 1); + if (!broadcast) { + prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", + leon_smp_nrcpus()); + if (leon_smp_nrcpus() > 1) { + BUG(); + } else { + prom_printf("continue anyway\n"); + return; + } + } + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); +} + +unsigned int leon_smp_getbroadcast(void) +{ + unsigned int mask; + mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast)); + return mask; +} + +int leon_smp_nrcpus(void) +{ + int nrcpu = + ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> + LEON3_IRQMPSTATUS_CPUNR) & 0xf) + 1; + return nrcpu; +} + +void __init leon_boot_cpus(void) +{ + int nrcpu = leon_smp_nrcpus(); + int me = smp_processor_id(); + + printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x \n", (unsigned int)me, + (unsigned int)nrcpu, (unsigned int)NR_CPUS, + (unsigned int)&(leon3_irqctrl_regs->mpstatus)); + + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); + leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me); + + leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); + + leon_configure_cache_smp(); + smp_setup_percpu_timer(); + local_flush_cache_all(); + +} + +int __cpuinit leon_boot_one_cpu(int i) +{ + + struct task_struct *p; + int timeout; + + /* Cook up an idler for this guy. */ + p = fork_idle(i); + + current_set[i] = task_thread_info(p); + + /* See trampoline.S:leon_smp_cpu_startup for details... + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int)srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, + (unsigned int)&leon3_irqctrl_regs->mpstatus); + local_flush_cache_all(); + + LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } + printk(KERN_INFO "Started CPU %d \n", (unsigned int)i); + + if (!(cpu_callin_map[i])) { + printk(KERN_ERR "Processor %d is stuck.\n", i); + return -ENODEV; + } else { + leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); + leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); + leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i); + } + + local_flush_cache_all(); + return 0; +} + +void __init leon_smp_done(void) +{ + + int i, first; + int *prev; + + /* setup cpu list for irq rotation */ + first = 0; + prev = &first; + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) { + *prev = i; + prev = &cpu_data(i).next; + } + } + *prev = first; + local_flush_cache_all(); + + /* Free unneeded trap tables */ + if (!cpu_isset(1, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu1)); + init_page_count(virt_to_page(trapbase_cpu1)); + free_page((unsigned long)trapbase_cpu1); + totalram_pages++; + num_physpages++; + } + if (!cpu_isset(2, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu2)); + init_page_count(virt_to_page(trapbase_cpu2)); + free_page((unsigned long)trapbase_cpu2); + totalram_pages++; + num_physpages++; + } + if (!cpu_isset(3, cpu_present_map)) { + ClearPageReserved(virt_to_page(trapbase_cpu3)); + init_page_count(virt_to_page(trapbase_cpu3)); + free_page((unsigned long)trapbase_cpu3); + totalram_pages++; + num_physpages++; + } + /* Ok, they are spinning and ready to go. */ + smp_processors_ready = 1; + +} + +void leon_irq_rotate(int cpu) +{ +} + +static struct smp_funcall { + smpfunc_t func; + unsigned long arg1; + unsigned long arg2; + unsigned long arg3; + unsigned long arg4; + unsigned long arg5; + unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ + unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ +} ccall_info; + +static DEFINE_SPINLOCK(cross_call_lock); + +/* Cross calls must be serialized, at least currently. */ +static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, + unsigned long arg2, unsigned long arg3, + unsigned long arg4) +{ + if (smp_processors_ready) { + register int high = NR_CPUS - 1; + unsigned long flags; + + spin_lock_irqsave(&cross_call_lock, flags); + + { + /* If you make changes here, make sure gcc generates proper code... */ + register smpfunc_t f asm("i0") = func; + register unsigned long a1 asm("i1") = arg1; + register unsigned long a2 asm("i2") = arg2; + register unsigned long a3 asm("i3") = arg3; + register unsigned long a4 asm("i4") = arg4; + register unsigned long a5 asm("i5") = 0; + + __asm__ __volatile__("std %0, [%6]\n\t" + "std %2, [%6 + 8]\n\t" + "std %4, [%6 + 16]\n\t" : : + "r"(f), "r"(a1), "r"(a2), "r"(a3), + "r"(a4), "r"(a5), + "r"(&ccall_info.func)); + } + + /* Init receive/complete mapping, plus fire the IPI's off. */ + { + register int i; + + cpu_clear(smp_processor_id(), mask); + cpus_and(mask, cpu_online_map, mask); + for (i = 0; i <= high; i++) { + if (cpu_isset(i, mask)) { + ccall_info.processors_in[i] = 0; + ccall_info.processors_out[i] = 0; + set_cpu_int(i, LEON3_IRQ_CROSS_CALL); + + } + } + } + + { + register int i; + + i = 0; + do { + if (!cpu_isset(i, mask)) + continue; + + while (!ccall_info.processors_in[i]) + barrier(); + } while (++i <= high); + + i = 0; + do { + if (!cpu_isset(i, mask)) + continue; + + while (!ccall_info.processors_out[i]) + barrier(); + } while (++i <= high); + } + + spin_unlock_irqrestore(&cross_call_lock, flags); + } +} + +/* Running cross calls. */ +void leon_cross_call_irq(void) +{ + int i = smp_processor_id(); + + ccall_info.processors_in[i] = 1; + ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, + ccall_info.arg4, ccall_info.arg5); + ccall_info.processors_out[i] = 1; +} + +void leon_percpu_timer_interrupt(struct pt_regs *regs) +{ + struct pt_regs *old_regs; + int cpu = smp_processor_id(); + + old_regs = set_irq_regs(regs); + + leon_clear_profile_irq(cpu); + + profile_tick(CPU_PROFILING); + + if (!--prof_counter(cpu)) { + int user = user_mode(regs); + + irq_enter(); + update_process_times(user); + irq_exit(); + + prof_counter(cpu) = prof_multiplier(cpu); + } + set_irq_regs(old_regs); +} + +static void __init smp_setup_percpu_timer(void) +{ + int cpu = smp_processor_id(); + + prof_counter(cpu) = prof_multiplier(cpu) = 1; +} + +void __init leon_blackbox_id(unsigned *addr) +{ + int rd = *addr & 0x3e000000; + int rs1 = rd >> 11; + + /* patch places where ___b_hard_smp_processor_id appears */ + addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ + addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ + addr[2] = 0x01000000; /* nop */ +} + +void __init leon_blackbox_current(unsigned *addr) +{ + int rd = *addr & 0x3e000000; + int rs1 = rd >> 11; + + /* patch LOAD_CURRENT macro where ___b_load_current appears */ + addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ + addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ + addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */ + +} + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +void pmc_leon_idle(void) +{ + __asm__ volatile ("mov %g0, %asr19"); +} + +void __init leon_init_smp(void) +{ + /* Patch ipi15 trap table */ + t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); + + BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id); + BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current); + BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, + BTFIXUPCALL_NORM); + +#ifndef PMC_NO_IDLE + /* Assign power management IDLE handler */ + pm_idle = pmc_leon_idle; + printk(KERN_INFO "leon: power management initialized\n"); +#endif + +} + +#endif /* CONFIG_SPARC_LEON */ diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 938da19..cdc91d9 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/mm.h> #include <linux/miscdevice.h> +#include <linux/bootmem.h> #include <asm/cpudata.h> #include <asm/hypervisor.h> @@ -108,25 +109,15 @@ static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size) static void mdesc_lmb_free(struct mdesc_handle *hp) { - unsigned int alloc_size, handle_size = hp->handle_size; - unsigned long start, end; + unsigned int alloc_size; + unsigned long start; BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(!list_empty(&hp->list)); - alloc_size = PAGE_ALIGN(handle_size); - - start = (unsigned long) hp; - end = start + alloc_size; - - while (start < end) { - struct page *p; - - p = virt_to_page(start); - ClearPageReserved(p); - __free_page(p); - start += PAGE_SIZE; - } + alloc_size = PAGE_ALIGN(hp->handle_size); + start = __pa(hp); + free_bootmem_late(start, alloc_size); } static struct mdesc_mem_ops lmb_mdesc_ops = { diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 881947e..0a6f2d1 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -104,9 +104,19 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, int i; /* Check address type match */ - if ((addr[0] ^ range[0]) & 0x03000000) - return -EINVAL; + if (!((addr[0] ^ range[0]) & 0x03000000)) + goto type_match; + + /* Special exception, we can map a 64-bit address into + * a 32-bit range. + */ + if ((addr[0] & 0x03000000) == 0x03000000 && + (range[0] & 0x03000000) == 0x02000000) + goto type_match; + + return -EINVAL; +type_match: if (of_out_of_range(addr + 1, range + 1, range + na + pna, na - 1, ns)) return -EINVAL; diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index c686486..539e83f 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -1064,7 +1064,6 @@ int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) return (device_mask & dma_addr_mask) == dma_addr_mask; } -EXPORT_SYMBOL(pci_dma_supported); void pci_resource_to_user(const struct pci_dev *pdev, int bar, const struct resource *rp, resource_size_t *start, @@ -1081,3 +1080,10 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar, *start = rp->start - offset; *end = rp->end - offset; } + +static int __init pcibios_init(void) +{ + pci_dfl_cache_line_size = 64 >> 2; + return 0; +} +subsys_initcall(pcibios_init); diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index f1be37a..e1b0541 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -112,7 +112,7 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) } static struct irq_chip msi_irq = { - .typename = "PCI-MSI", + .name = "PCI-MSI", .mask = mask_msi_irq, .unmask = unmask_msi_irq, .enable = unmask_msi_irq, diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2d6a1b1..fa5936e 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -56,7 +56,8 @@ struct cpu_hw_events { struct perf_event *events[MAX_HWEVENTS]; unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; - int enabled; + u64 pcr; + int enabled; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; @@ -68,8 +69,30 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +static unsigned long perf_event_encode(const struct perf_event_map *pmap) +{ + return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; +} + +static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) +{ + *msk = val & 0xff; + *enc = val >> 16; +} + +#define C(x) PERF_COUNT_HW_CACHE_##x + +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +typedef struct perf_event_map cache_map_t + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + struct sparc_pmu { const struct perf_event_map *(*event_map)(int); + const cache_map_t *cache_map; int max_events; int upper_shift; int lower_shift; @@ -80,21 +103,109 @@ struct sparc_pmu { int lower_nop; }; -static const struct perf_event_map ultra3i_perfmon_event_map[] = { +static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; -static const struct perf_event_map *ultra3i_event_map(int event_id) +static const struct perf_event_map *ultra3_event_map(int event_id) { - return &ultra3i_perfmon_event_map[event_id]; + return &ultra3_perfmon_event_map[event_id]; } -static const struct sparc_pmu ultra3i_pmu = { - .event_map = ultra3i_event_map, - .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), +static const cache_map_t ultra3_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu ultra3_pmu = { + .event_map = ultra3_event_map, + .cache_map = &ultra3_cache_map, + .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, @@ -102,6 +213,121 @@ static const struct sparc_pmu ultra3i_pmu = { .lower_nop = 0x14, }; +/* Niagara1 is very limited. The upper PIC is hard-locked to count + * only instructions, so it is free running which creates all kinds of + * problems. Some hardware designs make one wonder if the creator + * even looked at how this stuff gets used by software. + */ +static const struct perf_event_map niagara1_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, +}; + +static const struct perf_event_map *niagara1_event_map(int event_id) +{ + return &niagara1_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara1_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, + [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu niagara1_pmu = { + .event_map = niagara1_event_map, + .cache_map = &niagara1_cache_map, + .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), + .upper_shift = 0, + .lower_shift = 4, + .event_mask = 0x7, + .upper_nop = 0x0, + .lower_nop = 0x0, +}; + static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, @@ -116,14 +342,102 @@ static const struct perf_event_map *niagara2_event_map(int event_id) return &niagara2_perfmon_event_map[event_id]; } +static const cache_map_t niagara2_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, + .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .upper_shift = 19, .lower_shift = 6, .event_mask = 0xfff, .hv_bit = 0x8, - .irq_bit = 0x03, + .irq_bit = 0x30, .upper_nop = 0x220, .lower_nop = 0x220, }; @@ -151,23 +465,30 @@ static u64 nop_for_index(int idx) sparc_pmu->lower_nop, idx); } -static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 val, mask = mask_for_index(idx); - val = pcr_ops->read(); - pcr_ops->write((val & ~mask) | hwc->config); + val = cpuc->pcr; + val &= ~mask; + val |= hwc->config; + cpuc->pcr = val; + + pcr_ops->write(cpuc->pcr); } -static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); - u64 val = pcr_ops->read(); + u64 val; - pcr_ops->write((val & ~mask) | nop); + val = cpuc->pcr; + val &= ~mask; + val |= nop; + cpuc->pcr = val; + + pcr_ops->write(cpuc->pcr); } void hw_perf_enable(void) @@ -182,7 +503,7 @@ void hw_perf_enable(void) cpuc->enabled = 1; barrier(); - val = pcr_ops->read(); + val = cpuc->pcr; for (i = 0; i < MAX_HWEVENTS; i++) { struct perf_event *cp = cpuc->events[i]; @@ -194,7 +515,9 @@ void hw_perf_enable(void) val |= hwc->config_base; } - pcr_ops->write(val); + cpuc->pcr = val; + + pcr_ops->write(cpuc->pcr); } void hw_perf_disable(void) @@ -207,10 +530,12 @@ void hw_perf_disable(void) cpuc->enabled = 0; - val = pcr_ops->read(); + val = cpuc->pcr; val &= ~(PCR_UTRACE | PCR_STRACE | sparc_pmu->hv_bit | sparc_pmu->irq_bit); - pcr_ops->write(val); + cpuc->pcr = val; + + pcr_ops->write(cpuc->pcr); } static u32 read_pmc(int idx) @@ -242,7 +567,7 @@ static void write_pmc(int idx, u64 val) } static int sparc_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -282,19 +607,19 @@ static int sparc_pmu_enable(struct perf_event *event) if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; - sparc_pmu_disable_event(hwc, idx); + sparc_pmu_disable_event(cpuc, hwc, idx); cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); sparc_perf_event_set_period(event, hwc, idx); - sparc_pmu_enable_event(hwc, idx); + sparc_pmu_enable_event(cpuc, hwc, idx); perf_event_update_userpage(event); return 0; } static u64 sparc_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; @@ -324,7 +649,7 @@ static void sparc_pmu_disable(struct perf_event *event) int idx = hwc->idx; clear_bit(idx, cpuc->active_mask); - sparc_pmu_disable_event(hwc, idx); + sparc_pmu_disable_event(cpuc, hwc, idx); barrier(); @@ -338,18 +663,29 @@ static void sparc_pmu_disable(struct perf_event *event) static void sparc_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + sparc_perf_event_update(event, hwc, hwc->idx); } static void sparc_pmu_unthrottle(struct perf_event *event) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - sparc_pmu_enable_event(hwc, hwc->idx); + + sparc_pmu_enable_event(cpuc, hwc, hwc->idx); } static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); +static void perf_stop_nmi_watchdog(void *unused) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + stop_nmi_watchdog(NULL); + cpuc->pcr = pcr_ops->read(); +} + void perf_event_grab_pmc(void) { if (atomic_inc_not_zero(&active_events)) @@ -358,7 +694,7 @@ void perf_event_grab_pmc(void) mutex_lock(&pmc_grab_mutex); if (atomic_read(&active_events) == 0) { if (atomic_read(&nmi_active) > 0) { - on_each_cpu(stop_nmi_watchdog, NULL, 1); + on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); BUG_ON(atomic_read(&nmi_active) != 0); } atomic_inc(&active_events); @@ -375,30 +711,160 @@ void perf_event_release_pmc(void) } } +static const struct perf_event_map *sparc_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct perf_event_map *pmap; + + if (!sparc_pmu->cache_map) + return ERR_PTR(-ENOENT); + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); + + if (pmap->encoding == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + if (pmap->encoding == CACHE_OP_NONSENSE) + return ERR_PTR(-EINVAL); + + return pmap; +} + static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); } +/* Make sure all events can be scheduled into the hardware at + * the same time. This is simplified by the fact that we only + * need to support 2 simultaneous HW events. + */ +static int sparc_check_constraints(unsigned long *events, int n_ev) +{ + if (n_ev <= perf_max_events) { + u8 msk1, msk2; + u16 dummy; + + if (n_ev == 1) + return 0; + BUG_ON(n_ev != 2); + perf_event_decode(events[0], &dummy, &msk1); + perf_event_decode(events[1], &dummy, &msk2); + + /* If both events can go on any counter, OK. */ + if (msk1 == (PIC_UPPER | PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If one event is limited to a specific counter, + * and the other can go on both, OK. + */ + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If the events are fixed to different counters, OK. */ + if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || + (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) + return 0; + + /* Otherwise, there is a conflict. */ + } + + return -1; +} + +static int check_excludes(struct perf_event **evts, int n_prev, int n_new) +{ + int eu = 0, ek = 0, eh = 0; + struct perf_event *event; + int i, n, first; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + first = 1; + for (i = 0; i < n; i++) { + event = evts[i]; + if (first) { + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; + first = 0; + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { + return -EAGAIN; + } + } + + return 0; +} + +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *evts[], unsigned long *events) +{ + struct perf_event *event; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + evts[n] = group; + events[n++] = group->hw.event_base; + } + list_for_each_entry(event, &group->sibling_list, group_entry) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + evts[n] = event; + events[n++] = event->hw.event_base; + } + } + return n; +} + static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; + struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; + unsigned long events[MAX_HWEVENTS]; const struct perf_event_map *pmap; u64 enc; + int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; - if (attr->type != PERF_TYPE_HARDWARE) + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + pmap = sparc_pmu->event_map(attr->config); + } else if (attr->type == PERF_TYPE_HW_CACHE) { + pmap = sparc_map_cache_event(attr->config); + if (IS_ERR(pmap)) + return PTR_ERR(pmap); + } else return -EOPNOTSUPP; - if (attr->config >= sparc_pmu->max_events) - return -EINVAL; - - perf_event_grab_pmc(); - event->destroy = hw_perf_event_destroy; - /* We save the enable bits in the config_base. So to * turn off sampling just write 'config', and to enable * things write 'config | config_base'. @@ -411,15 +877,39 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; + hwc->event_base = perf_event_encode(pmap); + + enc = pmap->encoding; + + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + perf_max_events - 1, + evts, events); + if (n < 0) + return -EINVAL; + } + events[n] = hwc->event_base; + evts[n] = event; + + if (check_excludes(evts, n, 1)) + return -EINVAL; + + if (sparc_check_constraints(events, n + 1)) + return -EINVAL; + + /* Try to do all error checking before this point, as unwinding + * state after grabbing the PMC is difficult. + */ + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); } - pmap = sparc_pmu->event_map(attr->config); - - enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; enc <<= sparc_pmu->upper_shift; @@ -472,7 +962,7 @@ void perf_event_print_debug(void) } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) + unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; @@ -513,7 +1003,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, continue; if (perf_event_overflow(event, 1, &data, regs)) - sparc_pmu_disable_event(hwc, idx); + sparc_pmu_disable_event(cpuc, hwc, idx); } return NOTIFY_STOP; @@ -525,8 +1015,15 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { static bool __init supported_pmu(void) { - if (!strcmp(sparc_pmu_type, "ultra3i")) { - sparc_pmu = &ultra3i_pmu; + if (!strcmp(sparc_pmu_type, "ultra3") || + !strcmp(sparc_pmu_type, "ultra3+") || + !strcmp(sparc_pmu_type, "ultra3i") || + !strcmp(sparc_pmu_type, "ultra4+")) { + sparc_pmu = &ultra3_pmu; + return true; + } + if (!strcmp(sparc_pmu_type, "niagara")) { + sparc_pmu = &niagara1_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2")) { diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 138910c..d80a65d 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -79,6 +79,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len err = -ENODEV; + mutex_lock(&of_set_property_mutex); write_lock(&devtree_lock); prevp = &dp->properties; while (*prevp) { @@ -88,9 +89,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len void *old_val = prop->value; int ret; - mutex_lock(&of_set_property_mutex); ret = prom_setprop(dp->node, name, val, len); - mutex_unlock(&of_set_property_mutex); err = -EINVAL; if (ret >= 0) { @@ -109,6 +108,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len prevp = &(*prevp)->next; } write_unlock(&devtree_lock); + mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 4ae91dc..2f6524d 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -23,6 +23,7 @@ #include <linux/signal.h> #include <linux/regset.h> #include <linux/tracehook.h> +#include <trace/syscall.h> #include <linux/compat.h> #include <linux/elf.h> @@ -37,6 +38,9 @@ #include <asm/cpudata.h> #include <asm/cacheflush.h> +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + #include "entry.h" /* #define ALLOW_INIT_TRACING */ @@ -1059,6 +1063,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->u_regs[UREG_G1]); + if (unlikely(current->audit_context) && !ret) audit_syscall_entry((test_thread_flag(TIF_32BIT) ? AUDIT_ARCH_SPARC : @@ -1084,6 +1091,9 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(result, regs->u_regs[UREG_I0]); } + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->u_regs[UREG_G1]); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 2118033..a2a79e7 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -46,6 +46,7 @@ #include <asm/setup.h> #include <asm/mmu.h> #include <asm/ns87303.h> +#include <asm/btext.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> @@ -286,7 +287,10 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); boot_flags_init(*cmdline_p); - register_console(&prom_early_console); +#ifdef CONFIG_EARLYFB + if (btext_find_display()) +#endif + register_console(&prom_early_console); if (tlb_type == hypervisor) printk("ARCH: SUN4V\n"); diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 132d81f..91c10fb 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -32,6 +32,7 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/cpudata.h> +#include <asm/leon.h> #include "irq.h" @@ -96,6 +97,9 @@ void __init smp_cpus_done(unsigned int max_cpus) case sun4d: smp4d_smp_done(); break; + case sparc_leon: + leon_smp_done(); + break; case sun4e: printk("SUN4E\n"); BUG(); @@ -306,6 +310,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus) case sun4d: smp4d_boot_cpus(); break; + case sparc_leon: + leon_boot_cpus(); + break; case sun4e: printk("SUN4E\n"); BUG(); @@ -376,6 +383,9 @@ int __cpuinit __cpu_up(unsigned int cpu) case sun4d: ret = smp4d_boot_one_cpu(cpu); break; + case sparc_leon: + ret = leon_boot_one_cpu(cpu); + break; case sun4e: printk("SUN4E\n"); BUG(); diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index 0f26066..372ad59 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -38,17 +38,5 @@ EXPORT_SYMBOL(sun4v_niagara_setperf); EXPORT_SYMBOL(sun4v_niagara2_getperf); EXPORT_SYMBOL(sun4v_niagara2_setperf); -#ifdef CONFIG_PCI -/* inline functions in asm/pci_64.h */ -EXPORT_SYMBOL(pci_alloc_consistent); -EXPORT_SYMBOL(pci_free_consistent); -EXPORT_SYMBOL(pci_map_single); -EXPORT_SYMBOL(pci_unmap_single); -EXPORT_SYMBOL(pci_map_sg); -EXPORT_SYMBOL(pci_unmap_sg); -EXPORT_SYMBOL(pci_dma_sync_single_for_cpu); -EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu); -#endif - /* Exporting a symbol from /init/main.c */ EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index 04e28b2..dc0ac19 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -26,11 +26,6 @@ #include <linux/nfs_fs.h> #include <linux/quota.h> #include <linux/module.h> -#include <linux/sunrpc/svc.h> -#include <linux/nfsd/nfsd.h> -#include <linux/nfsd/cache.h> -#include <linux/nfsd/xdr.h> -#include <linux/nfsd/syscall.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> @@ -569,85 +564,6 @@ asmlinkage long sparc32_open(const char __user *filename, return do_sys_open(AT_FDCWD, filename, flags, mode); } -extern unsigned long do_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr); - -asmlinkage unsigned long sys32_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, u32 __new_addr) -{ - unsigned long ret = -EINVAL; - unsigned long new_addr = __new_addr; - - if (unlikely(sparc_mmap_check(addr, old_len))) - goto out; - if (unlikely(sparc_mmap_check(new_addr, new_len))) - goto out; - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); - up_write(¤t->mm->mmap_sem); -out: - return ret; -} - -struct __sysctl_args32 { - u32 name; - int nlen; - u32 oldval; - u32 oldlenp; - u32 newval; - u32 newlen; - u32 __unused[4]; -}; - -asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) -{ -#ifndef CONFIG_SYSCTL_SYSCALL - return -ENOSYS; -#else - struct __sysctl_args32 tmp; - int error; - size_t oldlen, __user *oldlenp = NULL; - unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL; - - if (copy_from_user(&tmp, args, sizeof(tmp))) - return -EFAULT; - - if (tmp.oldval && tmp.oldlenp) { - /* Duh, this is ugly and might not work if sysctl_args - is in read-only memory, but do_sysctl does indirectly - a lot of uaccess in both directions and we'd have to - basically copy the whole sysctl.c here, and - glibc's __sysctl uses rw memory for the structure - anyway. */ - if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) || - put_user(oldlen, (size_t __user *)addr)) - return -EFAULT; - oldlenp = (size_t __user *)addr; - } - - lock_kernel(); - error = do_sysctl((int __user *)(unsigned long) tmp.name, - tmp.nlen, - (void __user *)(unsigned long) tmp.oldval, - oldlenp, - (void __user *)(unsigned long) tmp.newval, - tmp.newlen); - unlock_kernel(); - if (oldlenp) { - if (!error) { - if (get_user(oldlen, (size_t __user *)addr) || - put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp)) - error = -EFAULT; - } - if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused))) - error = -EFAULT; - } - return error; -#endif -} - long sys32_lookup_dcookie(unsigned long cookie_high, unsigned long cookie_low, char __user *buf, size_t len) diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 03035c8..3a82e65 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -45,7 +45,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ - if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } @@ -79,15 +80,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi } } -asmlinkage unsigned long sparc_brk(unsigned long brk) -{ - if(ARCH_SUN4C) { - if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000)) - return current->mm->brk; - } - return sys_brk(brk); -} - /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way unix traditionally does this, though. @@ -234,31 +226,6 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) } /* Linux version of mmap */ -static unsigned long do_mmap2(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, unsigned long fd, - unsigned long pgoff) -{ - struct file * file = NULL; - unsigned long retval = -EBADF; - - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - - len = PAGE_ALIGN(len); - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - - down_write(¤t->mm->mmap_sem); - retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); -out: - return retval; -} asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, @@ -266,14 +233,16 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, { /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE we have. */ - return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); } asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off) { - return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + /* no alignment check? */ + return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); } long sparc_remap_file_pages(unsigned long start, unsigned long size, @@ -287,27 +256,6 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size, (pgoff >> (PAGE_SHIFT - 12)), flags); } -extern unsigned long do_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr); - -asmlinkage unsigned long sparc_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) -{ - unsigned long ret = -EINVAL; - - if (unlikely(sparc_mmap_check(addr, old_len))) - goto out; - if (unlikely(sparc_mmap_check(new_addr, new_len))) - goto out; - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); - up_write(¤t->mm->mmap_sem); -out: - return ret; -} - /* we come to here via sys_nis_syscall so it can setup the regs argument */ asmlinkage unsigned long c_sys_nis_syscall (struct pt_regs *regs) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index e2d1024..cfa0e19 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -317,10 +317,14 @@ bottomup: unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long align_goal, addr = -ENOMEM; + unsigned long (*get_area)(struct file *, unsigned long, + unsigned long, unsigned long, unsigned long); + + get_area = current->mm->get_unmapped_area; if (flags & MAP_FIXED) { /* Ok, don't mess with it. */ - return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); + return get_area(NULL, orig_addr, len, pgoff, flags); } flags &= ~MAP_SHARED; @@ -333,7 +337,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u align_goal = (64UL * 1024); do { - addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); + addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); if (!(addr & ~PAGE_MASK)) { addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); break; @@ -351,7 +355,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u * be obtained. */ if (addr & ~PAGE_MASK) - addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); + addr = get_area(NULL, orig_addr, len, pgoff, flags); return addr; } @@ -399,18 +403,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) } } -SYSCALL_DEFINE1(sparc_brk, unsigned long, brk) -{ - /* People could try to be nasty and use ta 0x6d in 32bit programs */ - if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) - return current->mm->brk; - - if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) - return current->mm->brk; - - return sys_brk(brk); -} - /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way unix traditionally does this, though. @@ -568,23 +560,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { - struct file * file = NULL; - unsigned long retval = -EBADF; - - if (!(flags & MAP_ANONYMOUS)) { - file = fget(fd); - if (!file) - goto out; - } - flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); - len = PAGE_ALIGN(len); + unsigned long retval = -EINVAL; - down_write(¤t->mm->mmap_sem); - retval = do_mmap(file, addr, len, prot, flags, off); - up_write(¤t->mm->mmap_sem); - - if (file) - fput(file); + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return retval; } @@ -614,12 +596,6 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, if (test_thread_flag(TIF_32BIT)) goto out; - if (unlikely(new_len >= VA_EXCLUDE_START)) - goto out; - if (unlikely(sparc_mmap_check(addr, old_len))) - goto out; - if (unlikely(sparc_mmap_check(new_addr, new_len))) - goto out; down_write(¤t->mm->mmap_sem); ret = do_mremap(addr, old_len, new_len, flags, new_addr); diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index d150c2a..dc4a458 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -62,7 +62,7 @@ sys32_rt_sigreturn: #endif .align 32 1: ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 be,pt %icc, rtrap nop call syscall_trace_leave @@ -187,7 +187,7 @@ linux_syscall_trace: .globl linux_sparc_syscall32 linux_sparc_syscall32: /* Direct access to user regs, much faster. */ - cmp %g1, NR_SYSCALLS ! IEU1 Group + cmp %g1, NR_syscalls ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI srl %i0, 0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group @@ -198,7 +198,7 @@ linux_sparc_syscall32: srl %i5, 0, %o5 ! IEU1 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 call %l7 ! CTI Group brk forced @@ -210,7 +210,7 @@ linux_sparc_syscall32: .globl linux_sparc_syscall linux_sparc_syscall: /* Direct access to user regs, much faster. */ - cmp %g1, NR_SYSCALLS ! IEU1 Group + cmp %g1, NR_syscalls ! IEU1 Group bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI mov %i0, %o0 ! IEU0 sll %g1, 2, %l4 ! IEU0 Group @@ -221,7 +221,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -245,7 +245,7 @@ ret_sys_call: cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 80: /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 @@ -260,7 +260,7 @@ ret_sys_call: /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h index a63c5d2..d2f999a 100644 --- a/arch/sparc/kernel/systbls.h +++ b/arch/sparc/kernel/systbls.h @@ -9,7 +9,6 @@ struct new_utsname; extern asmlinkage unsigned long sys_getpagesize(void); -extern asmlinkage unsigned long sparc_brk(unsigned long brk); extern asmlinkage long sparc_pipe(struct pt_regs *regs); extern asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 0f1658d..801fc8e 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -19,7 +19,7 @@ sys_call_table: /*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write /*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link /*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod -/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek +/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek /*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 /*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause /*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice @@ -67,7 +67,7 @@ sys_call_table: /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep -/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl +/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy @@ -82,5 +82,5 @@ sys_call_table: /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv -/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open +/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 009825f..e575b46 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -21,7 +21,7 @@ sys_call_table32: /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod -/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek +/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice @@ -68,7 +68,7 @@ sys_call_table32: .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall /*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep -/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl +/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy @@ -83,7 +83,7 @@ sys_call_table32: /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg #endif /* CONFIG_COMPAT */ @@ -96,7 +96,7 @@ sys_call_table: /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod -/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek +/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice @@ -158,4 +158,4 @@ sys_call_table: /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv - .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 614ac7b..5b2f595 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -210,9 +210,6 @@ static void __init sbus_time_init(void) btfixup(); sparc_init_timers(timer_interrupt); - - /* Now that OBP ticker has been silenced, it is safe to enable IRQ. */ - local_irq_enable(); } void __init time_init(void) diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index da1218e..67e1651 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -774,26 +774,9 @@ void __devinit setup_sparc64_timer(void) static struct clocksource clocksource_tick = { .rating = 100, .mask = CLOCKSOURCE_MASK(64), - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static void __init setup_clockevent_multiplier(unsigned long hz) -{ - unsigned long mult, shift = 32; - - while (1) { - mult = div_sc(hz, NSEC_PER_SEC, shift); - if (mult && (mult >> 32UL) == 0UL) - break; - - shift--; - } - - sparc64_clockevent.shift = shift; - sparc64_clockevent.mult = mult; -} - static unsigned long tb_ticks_per_usec __read_mostly; void __delay(unsigned long loops) @@ -828,9 +811,7 @@ void __init time_init(void) clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); clocksource_tick.name = tick_ops->name; - clocksource_tick.mult = - clocksource_hz2mult(freq, - clocksource_tick.shift); + clocksource_calc_mult_shift(&clocksource_tick, freq, 4); clocksource_tick.read = clocksource_tick_read; printk("clocksource: mult[%x] shift[%d]\n", @@ -839,15 +820,14 @@ void __init time_init(void) clocksource_register(&clocksource_tick); sparc64_clockevent.name = tick_ops->name; - - setup_clockevent_multiplier(freq); + clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); sparc64_clockevent.max_delta_ns = clockevent_delta2ns(0x7fffffffffffffffUL, &sparc64_clockevent); sparc64_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &sparc64_clockevent); - printk("clockevent: mult[%lx] shift[%d]\n", + printk("clockevent: mult[%x] shift[%d]\n", sparc64_clockevent.mult, sparc64_clockevent.shift); setup_sparc64_timer(); diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S index 5e235c5..691f484 100644 --- a/arch/sparc/kernel/trampoline_32.S +++ b/arch/sparc/kernel/trampoline_32.S @@ -15,7 +15,7 @@ #include <asm/contregs.h> #include <asm/thread_info.h> - .globl sun4m_cpu_startup, __smp4m_processor_id + .globl sun4m_cpu_startup, __smp4m_processor_id, __leon_processor_id .globl sun4d_cpu_startup, __smp4d_processor_id __CPUINIT @@ -106,6 +106,12 @@ __smp4d_processor_id: retl mov %g1, %o7 +__leon_processor_id: + rd %asr17,%g2 + srl %g2,28,%g2 + retl + mov %g1, %o7 + /* CPUID in bootbus can be found at PA 0xff0140000 */ #define SUN4D_BOOTBUS_CPUID 0xf0140000 @@ -160,3 +166,64 @@ sun4d_cpu_startup: nop b,a smp_do_cpu_idle + +#ifdef CONFIG_SPARC_LEON + + __CPUINIT + .align 4 + .global leon_smp_cpu_startup, smp_penguin_ctable + +leon_smp_cpu_startup: + + set smp_penguin_ctable,%g1 + ld [%g1+4],%g1 + srl %g1,4,%g1 + set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ + sta %g1, [%g5] ASI_M_MMUREGS + + /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ + set (PSR_PIL | PSR_S | PSR_PS), %g1 + wr %g1, 0x0, %psr ! traps off though + WRITE_PAUSE + + /* Our %wim is one behind CWP */ + mov 2, %g1 + wr %g1, 0x0, %wim + WRITE_PAUSE + + /* Set tbr - we use just one trap table. */ + set trapbase, %g1 + wr %g1, 0x0, %tbr + WRITE_PAUSE + + /* Get our CPU id */ + rd %asr17,%g3 + + /* Give ourselves a stack and curptr. */ + set current_set, %g5 + srl %g3, 28, %g4 + sll %g4, 2, %g4 + ld [%g5 + %g4], %g6 + + sethi %hi(THREAD_SIZE - STACKFRAME_SZ), %sp + or %sp, %lo(THREAD_SIZE - STACKFRAME_SZ), %sp + add %g6, %sp, %sp + + /* Turn on traps (PSR_ET). */ + rd %psr, %g1 + wr %g1, PSR_ET, %psr ! traps on + WRITE_PAUSE + + /* Init our caches, etc. */ + set poke_srmmu, %g5 + ld [%g5], %g5 + call %g5 + nop + + /* Start this processor. */ + call leon_callin + nop + + b,a smp_do_cpu_idle + +#endif diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 6b1e6cd..f8514e2 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -17,8 +17,7 @@ #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/smp_lock.h> - -/* #define DEBUG_MNA */ +#include <linux/perf_event.h> enum direction { load, /* ld, ldd, ldh, ldsh */ @@ -29,12 +28,6 @@ enum direction { invalid, }; -#ifdef DEBUG_MNA -static char *dirstrings[] = { - "load", "store", "both", "fpload", "fpstore", "invalid" -}; -#endif - static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; @@ -255,10 +248,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; -#ifdef DEBUG_MNA - printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n", - regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); -#endif + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -350,6 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 3792099..378ca82 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -20,10 +20,9 @@ #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> +#include <linux/perf_event.h> #include <asm/fpumacro.h> -/* #define DEBUG_MNA */ - enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ @@ -33,12 +32,6 @@ enum direction { invalid, }; -#ifdef DEBUG_MNA -static char *dirstrings[] = { - "load", "store", "both", "fpload", "fpstore", "invalid" -}; -#endif - static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; @@ -327,12 +320,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); -#ifdef DEBUG_MNA - printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " - "retpc[%016lx]\n", - regs->tpc, dirstrings[dir], addr, size, - regs->u_regs[UREG_RETPC]); -#endif + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -399,6 +387,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -445,6 +434,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; if (freg & 3) { @@ -566,6 +557,8 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); if (from_kernel || rd < 16) { @@ -596,6 +589,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -657,6 +651,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index b956fd7..9dfd2eb 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> +#include <linux/perf_event.h> #include <asm/ptrace.h> #include <asm/pstate.h> @@ -617,7 +618,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) rs2 = fps_regval(f, RS2(insn)); rd_val = 0; - src2 = (rs2 >> (opf == FMUL8x16AU_OPF) ? 16 : 0); + src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0); for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; u32 prod = src1 * src2; @@ -801,6 +802,8 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index e75faf0..c4b5e03 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -44,3 +44,4 @@ obj-y += iomap.o obj-$(CONFIG_SPARC32) += atomic32.o obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o +obj-y += usercopy.o diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S index b655729..615f401 100644 --- a/arch/sparc/lib/bzero.S +++ b/arch/sparc/lib/bzero.S @@ -6,10 +6,6 @@ .text - .globl __memset - .type __memset, #function -__memset: /* %o0=buf, %o1=pat, %o2=len */ - .globl memset .type memset, #function memset: /* %o0=buf, %o1=pat, %o2=len */ @@ -83,7 +79,6 @@ __bzero_done: retl mov %o3, %o0 .size __bzero, .-__bzero - .size __memset, .-__memset .size memset, .-memset #define EX_ST(x,y) \ diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S index 77f2285..3632cb3 100644 --- a/arch/sparc/lib/checksum_32.S +++ b/arch/sparc/lib/checksum_32.S @@ -560,7 +560,7 @@ __csum_partial_copy_end: mov %i0, %o1 mov %i1, %o0 5: - call __memcpy + call memcpy mov %i2, %o2 tst %o0 bne,a 2f diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 704b126..1b30bb3 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -30,7 +30,6 @@ EXPORT_SYMBOL(__memscan_generic); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__bzero); @@ -81,7 +80,6 @@ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic); /* Special internal versions of library functions. */ EXPORT_SYMBOL(__copy_1page); -EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(bzero_1page); diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 7ce9c65..24b8b12 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -64,8 +64,9 @@ mcount: 2: sethi %hi(softirq_stack), %g3 or %g3, %lo(softirq_stack), %g3 ldx [%g3 + %g1], %g7 + sub %g7, STACK_BIAS, %g7 cmp %sp, %g7 - bleu,pt %xcc, 2f + bleu,pt %xcc, 3f sethi %hi(THREAD_SIZE), %g3 add %g7, %g3, %g7 cmp %sp, %g7 @@ -75,7 +76,7 @@ mcount: * again, we are already trying to output the stack overflow * message. */ - sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough +3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough or %g7, %lo(ovstack), %g7 add %g7, OVSTACKSIZE, %g3 sub %g3, STACK_BIAS + 192, %g3 diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S index ce10bc8..34fe657 100644 --- a/arch/sparc/lib/memcpy.S +++ b/arch/sparc/lib/memcpy.S @@ -543,9 +543,6 @@ FUNC(memmove) b 3f add %o0, 2, %o0 -#ifdef __KERNEL__ -FUNC(__memcpy) -#endif FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ sub %o0, %o1, %o4 diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index 1c37ea8..99c017b 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -60,11 +60,10 @@ .globl __bzero_begin __bzero_begin: - .globl __bzero, __memset, + .globl __bzero .globl memset .globl __memset_start, __memset_end __memset_start: -__memset: memset: and %o1, 0xff, %g3 sll %g3, 8, %g2 diff --git a/arch/sparc/lib/usercopy.c b/arch/sparc/lib/usercopy.c new file mode 100644 index 0000000..14b363f --- /dev/null +++ b/arch/sparc/lib/usercopy.c @@ -0,0 +1,8 @@ +#include <linux/module.h> +#include <linux/bug.h> + +void copy_from_user_overflow(void) +{ + WARN(1, "Buffer overflow detected!\n"); +} +EXPORT_SYMBOL(copy_from_user_overflow); diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index e13f65d..a3fccde 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -67,6 +67,7 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include "sfp-util_32.h" @@ -163,6 +164,8 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) int retcode = 0; /* assume all succeed */ unsigned long insn; + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); printk("fpqdepth is %ld\n", fpt->thread.fpqdepth); diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 6863c9b..56d2c44 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> +#include <linux/perf_event.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> @@ -183,6 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 43b0da9..6081936 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -31,13 +31,12 @@ #include <asm/sections.h> #include <asm/mmu_context.h> -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs) +static inline __kprobes int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ - if (!user_mode(regs)) { + if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 0)) ret = 1; @@ -45,12 +44,6 @@ static inline int notify_page_fault(struct pt_regs *regs) } return ret; } -#else -static inline int notify_page_fault(struct pt_regs *regs) -{ - return 0; -} -#endif static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, @@ -73,7 +66,7 @@ static void __kprobes unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) +static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) { printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); @@ -170,8 +163,9 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) return insn; } -static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, - unsigned int insn, unsigned long address) +static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, + int fault_code, unsigned int insn, + unsigned long address) { unsigned char asi = ASI_P; @@ -225,7 +219,7 @@ cannot_handle: unhandled_fault (address, current, regs); } -static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) +static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) { static int times; @@ -237,8 +231,8 @@ static void noinline bogus_32bit_fault_tpc(struct pt_regs *regs) show_regs(regs); } -static void noinline bogus_32bit_fault_address(struct pt_regs *regs, - unsigned long addr) +static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, + unsigned long addr) { static int times; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a70a5e1..1886d37 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn) struct page *page; page = pfn_to_page(pfn); - if (page && page_mapping(page)) { + if (page) { unsigned long pg_flags; pg_flags = page->flags; diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index c2f772d..77d1b31 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h @@ -45,7 +45,7 @@ extern void free_initmem(void); #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ - sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) + sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT) extern unsigned long vmemmap_table[VMEMMAP_SIZE]; #endif diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 509b1ff..367321a 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1990,7 +1990,7 @@ void __init poke_leonsparc(void) void __init init_leon(void) { - srmmu_name = "Leon"; + srmmu_name = "LEON"; BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, BTFIXUPCALL_NORM); @@ -2037,8 +2037,6 @@ static void __init get_srmmu_type(void) /* First, check for sparc-leon. */ if (sparc_cpu_model == sparc_leon) { - psr_typ = 0xf; /* hardcoded ids for older models/simulators */ - psr_vers = 2; init_leon(); return; } @@ -2301,7 +2299,8 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); - if (sparc_cpu_model != sun4d) { + if (sparc_cpu_model != sun4d && + sparc_cpu_model != sparc_leon) { BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); @@ -2330,6 +2329,8 @@ void __init ld_mmu_srmmu(void) #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); + else if (sparc_cpu_model == sparc_leon) + leon_init_smp(); else sun4m_init_smp(); #endif diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index 2ffacd6..a89baf0 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -17,6 +17,7 @@ #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> +#include <linux/bitmap.h> #include <asm/sections.h> #include <asm/page.h> @@ -1021,20 +1022,12 @@ static char *sun4c_lockarea(char *vaddr, unsigned long size) npages = (((unsigned long)vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; - scan = 0; local_irq_save(flags); - for (;;) { - scan = find_next_zero_bit(sun4c_iobuffer_map, - iobuffer_map_size, scan); - if ((base = scan) + npages > iobuffer_map_size) goto abend; - for (;;) { - if (scan >= base + npages) goto found; - if (test_bit(scan, sun4c_iobuffer_map)) break; - scan++; - } - } + base = bitmap_find_next_zero_area(sun4c_iobuffer_map, iobuffer_map_size, + 0, npages, 0); + if (base >= iobuffer_map_size) + goto abend; -found: high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start; high = SUN4C_REAL_PGDIR_ALIGN(high); while (high > sun4c_iobuffer_high) { diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c index f97cb8b..f9024bc 100644 --- a/arch/sparc/oprofile/init.c +++ b/arch/sparc/oprofile/init.c @@ -11,6 +11,7 @@ #include <linux/oprofile.h> #include <linux/errno.h> #include <linux/init.h> +#include <linux/param.h> /* for HZ */ #ifdef CONFIG_SPARC64 #include <linux/notifier.h> |