diff options
Diffstat (limited to 'include/asm-ia64')
-rw-r--r-- | include/asm-ia64/bitops.h | 45 | ||||
-rw-r--r-- | include/asm-ia64/cacheflush.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/dma-mapping.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/elf.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/ide.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/io.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/kdebug.h | 15 | ||||
-rw-r--r-- | include/asm-ia64/kprobes.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/mca.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/meminit.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/numa.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/pgtable.h | 52 | ||||
-rw-r--r-- | include/asm-ia64/sal.h | 15 | ||||
-rw-r--r-- | include/asm-ia64/scatterlist.h | 7 | ||||
-rw-r--r-- | include/asm-ia64/semaphore.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 4 | ||||
-rw-r--r-- | include/asm-ia64/spinlock.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/system.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/tlbflush.h | 13 | ||||
-rw-r--r-- | include/asm-ia64/topology.h | 2 |
20 files changed, 112 insertions, 62 deletions
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 6cc517e..a977aff 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -9,6 +9,10 @@ * O(1) scheduler patch */ +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + #include <linux/compiler.h> #include <linux/types.h> #include <asm/intrinsics.h> @@ -94,6 +98,38 @@ clear_bit (int nr, volatile void *addr) } /** + * clear_bit_unlock - Clears a bit in memory with release + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit_unlock() is atomic and may not be reordered. It does + * contain a memory barrier suitable for unlock type operations. + */ +static __inline__ void +clear_bit_unlock (int nr, volatile void *addr) +{ + __u32 mask, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old & mask; + } while (cmpxchg_rel(m, old, new) != old); +} + +/** + * __clear_bit_unlock - Non-atomically clear a bit with release + * + * This is like clear_bit_unlock, but the implementation may use a non-atomic + * store (this one uses an atomic, however). + */ +#define __clear_bit_unlock clear_bit_unlock + +/** * __clear_bit - Clears a bit in memory (non-atomic version) */ static __inline__ void @@ -170,6 +206,15 @@ test_and_set_bit (int nr, volatile void *addr) } /** + * test_and_set_bit_lock - Set a bit and return its old value for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This is the same as test_and_set_bit on ia64 + */ +#define test_and_set_bit_lock test_and_set_bit + +/** * __test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h index 4906916..afcfbda 100644 --- a/include/asm-ia64/cacheflush.h +++ b/include/asm-ia64/cacheflush.h @@ -7,8 +7,8 @@ */ #include <linux/page-flags.h> +#include <linux/bitops.h> -#include <asm/bitops.h> #include <asm/page.h> /* diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index 6299b51..f1735a2 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h @@ -6,6 +6,7 @@ * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <asm/machvec.h> +#include <linux/scatterlist.h> #define dma_alloc_coherent platform_dma_alloc_coherent /* coherent mem. is cheap */ diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h index 25f9835..f10e29b 100644 --- a/include/asm-ia64/elf.h +++ b/include/asm-ia64/elf.h @@ -192,6 +192,7 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); #define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ extern char __kernel_syscall_via_epc[]; \ diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h index e928675..1ccf238 100644 --- a/include/asm-ia64/ide.h +++ b/include/asm-ia64/ide.h @@ -46,7 +46,6 @@ static inline unsigned long ide_default_io_base(int index) } } -#define IDE_ARCH_OBSOLETE_INIT #define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ #ifdef CONFIG_PCI diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index eb17a86..4ebed77 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h @@ -435,10 +435,6 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); extern void memset_io(volatile void __iomem *s, int c, long n); -#define dma_cache_inv(_start,_size) do { } while (0) -#define dma_cache_wback(_start,_size) do { } while (0) -#define dma_cache_wback_inv(_start,_size) do { } while (0) - # endif /* __KERNEL__ */ /* diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 320cd8e..35e4940 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h @@ -26,21 +26,6 @@ * 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more * events. */ -#include <linux/notifier.h> - -/* - * These are only here because kprobes.c wants them to implement a - * blatant layering violation. Will hopefully go away soon once all - * architectures are updated. - */ -static inline int register_page_fault_notifier(struct notifier_block *nb) -{ - return 0; -} -static inline int unregister_page_fault_notifier(struct notifier_block *nb) -{ - return 0; -} enum die_val { DIE_BREAK = 1, diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 067d9de..a93ce9e 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -83,7 +83,7 @@ struct kprobe_ctlblk { }; #define ARCH_SUPPORTS_KRETPROBES -#define ARCH_INACTIVE_KPROBE_COUNT 1 +#define kretprobe_blacklist_size 0 #define SLOT0_OPCODE_SHIFT (37) #define SLOT1_p1_OPCODE_SHIFT (37 - (64-46)) diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index edd5d01..823553b 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -151,6 +151,8 @@ extern void ia64_mca_cmc_vector_setup(void); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); extern void ia64_unreg_MCA_extension(void); extern u64 ia64_get_rnat(u64 *); +extern void ia64_mca_printk(const char * fmt, ...) + __attribute__ ((format (printf, 1, 2))); struct ia64_mca_notify_die { struct ia64_sal_os_state *sos; diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 3a62878..f93308f 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h @@ -35,7 +35,7 @@ extern void find_memory (void); extern void reserve_memory (void); extern void find_initrd (void); extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); -extern void efi_memmap_init(unsigned long *, unsigned long *); +extern unsigned long efi_memmap_init(unsigned long *s, unsigned long *e); extern int find_max_min_low_pfn (unsigned long , unsigned long, void *); extern unsigned long vmcore_find_descriptor_size(unsigned long address); diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h index 7d5e2cc..6a8a27c 100644 --- a/include/asm-ia64/numa.h +++ b/include/asm-ia64/numa.h @@ -24,6 +24,7 @@ extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; +extern pg_data_t *pgdat_list[MAX_NUMNODES]; /* Stuff below this line could be architecture independent */ diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index de6d01e..e6204f1 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -150,7 +150,7 @@ # ifndef __ASSEMBLY__ #include <linux/sched.h> /* for mm_struct */ -#include <asm/bitops.h> +#include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/processor.h> @@ -223,12 +223,6 @@ ia64_phys_addr_valid (unsigned long addr) * page table. */ -/* - * On some architectures, special things need to be done when setting - * the PTE in a page table. Nothing special needs to be on IA-64. - */ -#define set_pte(ptep, pteval) (*(ptep) = (pteval)) -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) #ifdef CONFIG_VIRTUAL_MEM_MAP @@ -236,8 +230,14 @@ ia64_phys_addr_valid (unsigned long addr) # define VMALLOC_END vmalloc_end extern unsigned long vmalloc_end; #else +#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) +/* SPARSEMEM_VMEMMAP uses half of vmalloc... */ +# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) +# define vmemmap ((struct page *)VMALLOC_END) +#else # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) #endif +#endif /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) @@ -315,6 +315,36 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkhuge(pte) (__pte(pte_val(pte))) /* + * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to + * sync icache and dcache when we insert *new* executable page. + * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache + * if necessary. + * + * set_pte() is also called by the kernel, but we can expect that the kernel + * flushes icache explicitly if necessary. + */ +#define pte_present_exec_user(pte)\ + ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \ + (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)) + +extern void __ia64_sync_icache_dcache(pte_t pteval); +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + /* page is present && page is user && page is executable + * && (page swapin or new page or page migraton + * || copy_on_write with page copying.) + */ + if (pte_present_exec_user(pteval) && + (!pte_present(*ptep) || + pte_pfn(*ptep) != pte_pfn(pteval))) + /* load_module() calles flush_icache_range() explicitly*/ + __ia64_sync_icache_dcache(pteval); + *ptep = pteval; +} + +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +/* * Make page protection values cacheable, uncacheable, or write- * combining. Note that "protection" is really a misnomer here as the * protection value contains the memory attribute bits, dirty bits, and @@ -483,12 +513,6 @@ extern struct page *zero_page_memmap_ptr; #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) #endif -/* - * IA-64 doesn't have any external MMU info: the page tables contain all the necessary - * information. However, we use this routine to take care of any (delayed) i-cache - * flushing that may be necessary. - */ -extern void lazy_mmu_prot_update (pte_t pte); #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* @@ -578,7 +602,7 @@ extern void lazy_mmu_prot_update (pte_t pte); #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PGD_OFFSET_GATE -#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE + #ifndef CONFIG_PGTABLE_4 #include <asm-generic/pgtable-nopud.h> diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h index 46cadf5..1f5412d 100644 --- a/include/asm-ia64/sal.h +++ b/include/asm-ia64/sal.h @@ -46,25 +46,28 @@ extern spinlock_t sal_lock; /* SAL spec _requires_ eight args for each call. */ -#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \ - result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7) +#define __IA64_FW_CALL(entry,result,a0,a1,a2,a3,a4,a5,a6,a7) \ + result = (*entry)(a0,a1,a2,a3,a4,a5,a6,a7) -# define SAL_CALL(result,args...) do { \ +# define IA64_FW_CALL(entry,result,args...) do { \ unsigned long __ia64_sc_flags; \ struct ia64_fpreg __ia64_sc_fr[6]; \ ia64_save_scratch_fpregs(__ia64_sc_fr); \ spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \ - __SAL_CALL(result, args); \ + __IA64_FW_CALL(entry, result, args); \ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \ ia64_load_scratch_fpregs(__ia64_sc_fr); \ } while (0) +# define SAL_CALL(result,args...) \ + IA64_FW_CALL(ia64_sal, result, args); + # define SAL_CALL_NOLOCK(result,args...) do { \ unsigned long __ia64_scn_flags; \ struct ia64_fpreg __ia64_scn_fr[6]; \ ia64_save_scratch_fpregs(__ia64_scn_fr); \ local_irq_save(__ia64_scn_flags); \ - __SAL_CALL(result, args); \ + __IA64_FW_CALL(ia64_sal, result, args); \ local_irq_restore(__ia64_scn_flags); \ ia64_load_scratch_fpregs(__ia64_scn_fr); \ } while (0) @@ -73,7 +76,7 @@ extern spinlock_t sal_lock; struct ia64_fpreg __ia64_scs_fr[6]; \ ia64_save_scratch_fpregs(__ia64_scs_fr); \ preempt_disable(); \ - __SAL_CALL(result, args); \ + __IA64_FW_CALL(ia64_sal, result, args); \ preempt_enable(); \ ia64_load_scratch_fpregs(__ia64_scs_fr); \ } while (0) diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h index a452ea2..d6f5787 100644 --- a/include/asm-ia64/scatterlist.h +++ b/include/asm-ia64/scatterlist.h @@ -9,7 +9,10 @@ #include <asm/types.h> struct scatterlist { - struct page *page; +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; unsigned int offset; unsigned int length; /* buffer length */ @@ -30,4 +33,6 @@ struct scatterlist { #define sg_dma_len(sg) ((sg)->dma_length) #define sg_dma_address(sg) ((sg)->dma_address) +#define ARCH_HAS_SG_CHAIN + #endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h index f483eeb..d8393d1 100644 --- a/include/asm-ia64/semaphore.h +++ b/include/asm-ia64/semaphore.h @@ -28,7 +28,6 @@ struct semaphore { struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) static inline void sema_init (struct semaphore *sem, int val) diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 6314b29..471cc2e 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -14,8 +14,8 @@ #include <linux/threads.h> #include <linux/kernel.h> #include <linux/cpumask.h> +#include <linux/bitops.h> -#include <asm/bitops.h> #include <asm/io.h> #include <asm/param.h> #include <asm/processor.h> @@ -58,7 +58,7 @@ extern char no_int_routing __devinitdata; extern cpumask_t cpu_online_map; extern cpumask_t cpu_core_map[NR_CPUS]; -extern cpumask_t cpu_sibling_map[NR_CPUS]; +DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); extern int smp_num_siblings; extern int smp_num_cpucores; extern void __iomem *ipi_base_addr; diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h index ff857e3..0229fb9 100644 --- a/include/asm-ia64/spinlock.h +++ b/include/asm-ia64/spinlock.h @@ -11,9 +11,9 @@ #include <linux/compiler.h> #include <linux/kernel.h> +#include <linux/bitops.h> #include <asm/atomic.h> -#include <asm/bitops.h> #include <asm/intrinsics.h> #include <asm/system.h> diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h index 91bb8e0..595112b 100644 --- a/include/asm-ia64/system.h +++ b/include/asm-ia64/system.h @@ -32,6 +32,8 @@ #include <linux/kernel.h> #include <linux/types.h> +#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */ + struct pci_vector_struct { __u16 segment; /* PCI Segment number */ __u16 bus; /* PCI Bus number */ diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h index e37f9fb..80bcb0a 100644 --- a/include/asm-ia64/tlbflush.h +++ b/include/asm-ia64/tlbflush.h @@ -84,19 +84,6 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) } /* - * Flush the TLB entries mapping the virtually mapped linear page - * table corresponding to address range [START-END). - */ -static inline void -flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) -{ - /* - * Deprecated. The virtual page table is now flushed via the normal gather/flush - * interface (see tlb.h). - */ -} - -/* * Flush the local TLB. Invoked from another cpu using an IPI. */ #ifdef CONFIG_SMP diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h index 233f1ca..2d67b72 100644 --- a/include/asm-ia64/topology.h +++ b/include/asm-ia64/topology.h @@ -112,7 +112,7 @@ void build_cpu_to_node_map(void); #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) #define topology_core_id(cpu) (cpu_data(cpu)->core_id) #define topology_core_siblings(cpu) (cpu_core_map[cpu]) -#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define smt_capable() (smp_num_siblings > 1) #endif |