aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-28 16:20:49 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-28 16:20:49 -0700
commit27d68a36c4f1ca2fc6be82620843493462c08c51 (patch)
treea06b451e19c25a77595c918ca81bbb30f0ec9ebf /include/asm-arm
parent76a22271fd14e3fe7660f8646db12f0780fa4fd2 (diff)
parent583e7f5d36547f0d84caf71d43b71f0530a47766 (diff)
downloadkernel_samsung_tuna-27d68a36c4f1ca2fc6be82620843493462c08c51.zip
kernel_samsung_tuna-27d68a36c4f1ca2fc6be82620843493462c08c51.tar.gz
kernel_samsung_tuna-27d68a36c4f1ca2fc6be82620843493462c08c51.tar.bz2
Merge branch 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'nommu' of master.kernel.org:/home/rmk/linux-2.6-arm: [ARM] nommu: backtrace code must not reference a discarded section [ARM] nommu: Initial uCLinux support for MMU-based CPUs [ARM] nommu: prevent Xscale-based machines being selected [ARM] nommu: export flush_dcache_page() [ARM] nommu: remove fault-armv, mmap and mm-armv files from nommu build [ARM] Remove TABLE_SIZE, and several unused function prototypes [ARM] nommu: Provide a simple flush_dcache_page implementation [ARM] nommu: add arch/arm/Kconfig-nommu to Kconfig files [ARM] nommu: add stubs for ioremap and friends [ARM] nommu: avoid selecting TLB and CPU specific copy code [ARM] nommu: uaccess tweaks [ARM] nommu: adjust headers for !MMU ARM systems [ARM] nommu: we need the TLS register emulation for nommu mode
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/bugs.h4
-rw-r--r--include/asm-arm/domain.h7
-rw-r--r--include/asm-arm/mach/map.h9
-rw-r--r--include/asm-arm/memory.h75
-rw-r--r--include/asm-arm/mmu.h16
-rw-r--r--include/asm-arm/mmu_context.h2
-rw-r--r--include/asm-arm/page-nommu.h51
-rw-r--r--include/asm-arm/page.h8
-rw-r--r--include/asm-arm/pgalloc.h8
-rw-r--r--include/asm-arm/pgtable-nommu.h123
-rw-r--r--include/asm-arm/pgtable.h10
-rw-r--r--include/asm-arm/proc-fns.h4
-rw-r--r--include/asm-arm/uaccess.h139
13 files changed, 378 insertions, 78 deletions
diff --git a/include/asm-arm/bugs.h b/include/asm-arm/bugs.h
index 4c80ec5..ca54eb0 100644
--- a/include/asm-arm/bugs.h
+++ b/include/asm-arm/bugs.h
@@ -10,8 +10,12 @@
#ifndef __ASM_BUGS_H
#define __ASM_BUGS_H
+#ifdef CONFIG_MMU
extern void check_writebuffer_bugs(void);
#define check_bugs() check_writebuffer_bugs()
+#else
+#define check_bugs() do { } while (0)
+#endif
#endif
diff --git a/include/asm-arm/domain.h b/include/asm-arm/domain.h
index f8ea2de..4c2885a 100644
--- a/include/asm-arm/domain.h
+++ b/include/asm-arm/domain.h
@@ -50,6 +50,8 @@
#define domain_val(dom,type) ((type) << (2*(dom)))
#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_MMU
#define set_domain(x) \
do { \
__asm__ __volatile__( \
@@ -66,5 +68,10 @@
set_domain(thread->cpu_domain); \
} while (0)
+#else
+#define set_domain(x) do { } while (0)
+#define modify_domain(dom,type) do { } while (0)
+#endif
+
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/mach/map.h b/include/asm-arm/mach/map.h
index e8ea67c..cef5364 100644
--- a/include/asm-arm/mach/map.h
+++ b/include/asm-arm/mach/map.h
@@ -16,8 +16,6 @@ struct map_desc {
unsigned int type;
};
-struct meminfo;
-
#define MT_DEVICE 0
#define MT_CACHECLEAN 1
#define MT_MINICLEAN 2
@@ -28,7 +26,8 @@ struct meminfo;
#define MT_IXP2000_DEVICE 7
#define MT_NONSHARED_DEVICE 8
-extern void create_memmap_holes(struct meminfo *);
-extern void memtable_init(struct meminfo *);
+#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
-extern void setup_io_desc(void);
+#else
+#define iotable_init(map,num) do { } while (0)
+#endif
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h
index 731e321..94f973b 100644
--- a/include/asm-arm/memory.h
+++ b/include/asm-arm/memory.h
@@ -2,6 +2,7 @@
* linux/include/asm-arm/memory.h
*
* Copyright (C) 2000-2002 Russell King
+ * modification for nommu, Hyok S. Choi, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,8 @@
#include <asm/arch/memory.h>
#include <asm/sizes.h>
+#ifdef CONFIG_MMU
+
#ifndef TASK_SIZE
/*
* TASK_SIZE - the maximum size of a user space task.
@@ -48,6 +51,60 @@
#endif
/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULE_END (PAGE_OFFSET)
+#define MODULE_START (MODULE_END - 16*1048576)
+
+#if TASK_SIZE > MODULE_START
+#error Top of user space clashes with start of module space
+#endif
+
+/*
+ * The XIP kernel gets mapped at the bottom of the module vm area.
+ * Since we use sections to map it, this macro replaces the physical address
+ * with its virtual address while keeping offset from the base section.
+ */
+#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
+
+#else /* CONFIG_MMU */
+
+/*
+ * The limitation of user task size can grow up to the end of free ram region.
+ * It is difficult to define and perhaps will never meet the original meaning
+ * of this define that was meant to.
+ * Fortunately, there is no reference for this in noMMU mode, for now.
+ */
+#ifndef TASK_SIZE
+#define TASK_SIZE (CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef TASK_UNMAPPED_BASE
+#define TASK_UNMAPPED_BASE UL(0x00000000)
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET (CONFIG_DRAM_BASE)
+#endif
+
+#ifndef END_MEM
+#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#endif
+
+#ifndef PAGE_OFFSET
+#define PAGE_OFFSET (PHYS_OFFSET)
+#endif
+
+/*
+ * The module can be at any place in ram in nommu mode.
+ */
+#define MODULE_END (END_MEM)
+#define MODULE_START (PHYS_OFFSET)
+
+#endif /* !CONFIG_MMU */
+
+/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
@@ -71,24 +128,6 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
-/*
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 32MB of the kernel text.
- */
-#define MODULE_END (PAGE_OFFSET)
-#define MODULE_START (MODULE_END - 16*1048576)
-
-#if TASK_SIZE > MODULE_START
-#error Top of user space clashes with start of module space
-#endif
-
-/*
- * The XIP kernel gets mapped at the bottom of the module vm area.
- * Since we use sections to map it, this macro replaces the physical address
- * with its virtual address while keeping offset from the base section.
- */
-#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff))
-
#ifndef __ASSEMBLY__
/*
diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h
index a457cb7..23dde52 100644
--- a/include/asm-arm/mmu.h
+++ b/include/asm-arm/mmu.h
@@ -1,6 +1,8 @@
#ifndef __ARM_MMU_H
#define __ARM_MMU_H
+#ifdef CONFIG_MMU
+
typedef struct {
#if __LINUX_ARM_ARCH__ >= 6
unsigned int id;
@@ -13,4 +15,18 @@ typedef struct {
#define ASID(mm) (0)
#endif
+#else
+
+/*
+ * From nommu.h:
+ * Copyright (C) 2002, David McCullough <davidm@snapgear.com>
+ * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
+ */
+typedef struct {
+ struct vm_list_struct *vmlist;
+ unsigned long end_brk;
+} mm_context_t;
+
+#endif
+
#endif
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h
index 81c59fa..9fadb01 100644
--- a/include/asm-arm/mmu_context.h
+++ b/include/asm-arm/mmu_context.h
@@ -82,6 +82,7 @@ static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
+#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
if (prev != next) {
@@ -91,6 +92,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (cache_is_vivt())
cpu_clear(cpu, prev->cpu_vm_mask);
}
+#endif
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/include/asm-arm/page-nommu.h b/include/asm-arm/page-nommu.h
new file mode 100644
index 0000000..a1bcad0
--- /dev/null
+++ b/include/asm-arm/page-nommu.h
@@ -0,0 +1,51 @@
+/*
+ * linux/include/asm-arm/page-nommu.h
+ *
+ * Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PAGE_NOMMU_H
+#define _ASMARM_PAGE_NOMMU_H
+
+#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
+#define KTHREAD_SIZE (8192)
+#else
+#define KTHREAD_SIZE PAGE_SIZE
+#endif
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t[2];
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) ((x)[0])
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgprot(x) (x)
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 66cfeb5..63d12f0 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -23,6 +23,12 @@
#ifndef __ASSEMBLY__
+#ifndef CONFIG_MMU
+
+#include "page-nommu.h"
+
+#else
+
#include <asm/glue.h>
/*
@@ -171,6 +177,8 @@ typedef unsigned long pgprot_t;
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
+#endif /* CONFIG_MMU */
+
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h
index c4ac2e6..4d43945 100644
--- a/include/asm-arm/pgalloc.h
+++ b/include/asm-arm/pgalloc.h
@@ -16,6 +16,10 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+#define check_pgt_cache() do { } while (0)
+
+#ifdef CONFIG_MMU
+
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
@@ -32,8 +36,6 @@ extern void free_pgd_slow(pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(pgd) free_pgd_slow(pgd)
-#define check_pgt_cache() do { } while (0)
-
/*
* Allocate one PTE table.
*
@@ -126,4 +128,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
}
+#endif /* CONFIG_MMU */
+
#endif
diff --git a/include/asm-arm/pgtable-nommu.h b/include/asm-arm/pgtable-nommu.h
new file mode 100644
index 0000000..b13322d
--- /dev/null
+++ b/include/asm-arm/pgtable-nommu.h
@@ -0,0 +1,123 @@
+/*
+ * linux/include/asm-arm/pgtable-nommu.h
+ *
+ * Copyright (C) 1995-2002 Russell King
+ * Copyright (C) 2004 Hyok S. Choi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _ASMARM_PGTABLE_NOMMU_H
+#define _ASMARM_PGTABLE_NOMMU_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/io.h>
+
+/*
+ * Trivial page table functions.
+ */
+#define pgd_present(pgd) (1)
+#define pgd_none(pgd) (0)
+#define pgd_bad(pgd) (0)
+#define pgd_clear(pgdp)
+#define kern_addr_valid(addr) (1)
+#define pmd_offset(a, b) ((void *)0)
+/* FIXME */
+/*
+ * PMD_SHIFT determines the size of the area a second-level page table can map
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT 21
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* FIXME */
+
+#define PAGE_NONE __pgprot(0)
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_COPY __pgprot(0)
+#define PAGE_READONLY __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+
+//extern void paging_init(struct meminfo *, struct machine_desc *);
+#define swapper_pg_dir ((pgd_t *) 0)
+
+#define __swp_type(x) (0)
+#define __swp_offset(x) (0)
+#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+
+typedef pte_t *pte_addr_t;
+
+static inline int pte_file(pte_t pte) { return 0; }
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+#define ZERO_PAGE(vaddr) (virt_to_page(0))
+
+/*
+ * Mark the prot value as uncacheable and unbufferable.
+ */
+#define pgprot_noncached(prot) __pgprot(0)
+#define pgprot_writecombine(prot) __pgprot(0)
+
+
+/*
+ * These would be in other places but having them here reduces the diffs.
+ */
+extern unsigned int kobjsize(const void *objp);
+extern int is_in_rom(unsigned long);
+
+/*
+ * No page table caches to initialise.
+ */
+#define pgtable_cache_init() do { } while (0)
+#define io_remap_page_range remap_page_range
+#define io_remap_pfn_range remap_pfn_range
+
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
+
+/*
+ * All 32bit addresses are effectively valid for vmalloc...
+ * Sort of meaningless for non-VM targets.
+ */
+#define VMALLOC_START 0
+#define VMALLOC_END 0xffffffff
+
+#define FIRST_USER_ADDRESS (0)
+
+#else
+
+/*
+ * dummy tlb and user structures.
+ */
+#define v3_tlb_fns (0)
+#define v4_tlb_fns (0)
+#define v4wb_tlb_fns (0)
+#define v4wbi_tlb_fns (0)
+#define v6_tlb_fns (0)
+
+#define v3_user_fns (0)
+#define v4_user_fns (0)
+#define v4_mc_user_fns (0)
+#define v4wb_user_fns (0)
+#define v4wt_user_fns (0)
+#define v6_user_fns (0)
+#define xscale_mc_user_fns (0)
+
+#endif /*__ASSEMBLY__*/
+
+#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index e85c08d..8d3919c 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -11,9 +11,15 @@
#define _ASMARM_PGTABLE_H
#include <asm-generic/4level-fixup.h>
+#include <asm/proc-fns.h>
+
+#ifndef CONFIG_MMU
+
+#include "pgtable-nommu.h"
+
+#else
#include <asm/memory.h>
-#include <asm/proc-fns.h>
#include <asm/arch/vmalloc.h>
/*
@@ -378,4 +384,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#endif /* !__ASSEMBLY__ */
+#endif /* CONFIG_MMU */
+
#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index e931089..1bde92c 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -165,6 +165,8 @@
#include <asm/memory.h>
+#ifdef CONFIG_MMU
+
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#define cpu_get_pgd() \
@@ -176,6 +178,8 @@
(pgd_t *)phys_to_virt(pg); \
})
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index f909dc7..87aba57 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -41,15 +41,24 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
-#define USER_DS TASK_SIZE
-
#define get_ds() (KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS TASK_SIZE
#define get_fs() (current_thread_info()->addr_limit)
-static inline void set_fs (mm_segment_t fs)
+static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
: "cc"); \
flag; })
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
* fixup code, but there are a few places where it intrudes on the
* main code path. When we only write to user space, there is no
* problem.
- *
- * The "__xxx" versions of the user access functions do not verify the
- * address space - it must have been done previously with a separate
- * "access_ok()" call.
- *
- * The "xxx_error" versions set the third argument to EFAULT if an
- * error occurs, and leave it unchanged on success. Note that these
- * versions are void (ie, don't return a value as such).
*/
-
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
__asm__ __volatile__ ( \
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
__e; \
})
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2) \
+ : "ip", "lr", "cc")
+
+#define put_user(x,p) \
+ ({ \
+ const register typeof(*(p)) __r2 asm("r2") = (x); \
+ const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+ __put_user_x(__r2, __p, __e, 1); \
+ break; \
+ case 2: \
+ __put_user_x(__r2, __p, __e, 2); \
+ break; \
+ case 4: \
+ __put_user_x(__r2, __p, __e, 4); \
+ break; \
+ case 8: \
+ __put_user_x(__r2, __p, __e, 8); \
+ break; \
+ default: __e = __put_user_bad(); break; \
+ } \
+ __e; \
+ })
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS KERNEL_DS
+
+#define segment_eq(a,b) (1)
+#define __addr_ok(addr) (1)
+#define __range_ok(addr,size) (0)
+#define get_fs() (KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p) __get_user(x,p)
+#define put_user(x,p) __put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success. Note that these
+ * versions are void (ie, don't return a value as such).
+ */
#define __get_user(x,ptr) \
({ \
long __gu_err = 0; \
@@ -212,43 +277,6 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-extern int __put_user_1(void *, unsigned int);
-extern int __put_user_2(void *, unsigned int);
-extern int __put_user_4(void *, unsigned int);
-extern int __put_user_8(void *, unsigned long long);
-extern int __put_user_bad(void);
-
-#define __put_user_x(__r2,__p,__e,__s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
- : "ip", "lr", "cc")
-
-#define put_user(x,p) \
- ({ \
- const register typeof(*(p)) __r2 asm("r2") = (x); \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
- register int __e asm("r0"); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- __e; \
- })
-
#define __put_user(x,ptr) \
({ \
long __pu_err = 0; \
@@ -354,9 +382,16 @@ do { \
: "cc")
+#ifdef CONFIG_MMU
extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
+#endif
+
extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long __strnlen_user(const char __user *s, long n);