From 819e36b8ceadb1d67db1ffe91b99c5282bfc6159 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Fri, 20 Apr 2012 14:10:36 +0200 Subject: tree moved to a better location --- include/linux/byteorder/big_endian.h | 69 ++++++++++++ include/linux/byteorder/generic.h | 180 ++++++++++++++++++++++++++++++++ include/linux/byteorder/little_endian.h | 69 ++++++++++++ include/linux/byteorder/swab.h | 158 ++++++++++++++++++++++++++++ 4 files changed, 476 insertions(+) create mode 100644 include/linux/byteorder/big_endian.h create mode 100644 include/linux/byteorder/generic.h create mode 100644 include/linux/byteorder/little_endian.h create mode 100644 include/linux/byteorder/swab.h (limited to 'include/linux/byteorder') diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h new file mode 100644 index 0000000..19b0c86 --- /dev/null +++ b/include/linux/byteorder/big_endian.h @@ -0,0 +1,69 @@ +#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H +#define _LINUX_BYTEORDER_BIG_ENDIAN_H + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif +#ifndef __BIG_ENDIAN_BITFIELD +#define __BIG_ENDIAN_BITFIELD +#endif +#define __BYTE_ORDER __BIG_ENDIAN + +#include + +#define __constant_htonl(x) ((__u32)(x)) +#define __constant_ntohl(x) ((__u32)(x)) +#define __constant_htons(x) ((__u16)(x)) +#define __constant_ntohs(x) ((__u16)(x)) +#define __constant_cpu_to_le64(x) ___swab64((x)) +#define __constant_le64_to_cpu(x) ___swab64((x)) +#define __constant_cpu_to_le32(x) ___swab32((x)) +#define __constant_le32_to_cpu(x) ___swab32((x)) +#define __constant_cpu_to_le16(x) ___swab16((x)) +#define __constant_le16_to_cpu(x) ___swab16((x)) +#define __constant_cpu_to_be64(x) ((__u64)(x)) +#define __constant_be64_to_cpu(x) ((__u64)(x)) +#define __constant_cpu_to_be32(x) ((__u32)(x)) +#define __constant_be32_to_cpu(x) ((__u32)(x)) +#define __constant_cpu_to_be16(x) ((__u16)(x)) +#define __constant_be16_to_cpu(x) ((__u16)(x)) +#define __cpu_to_le64(x) __swab64((x)) +#define __le64_to_cpu(x) __swab64((x)) +#define __cpu_to_le32(x) __swab32((x)) +#define __le32_to_cpu(x) __swab32((x)) +#define __cpu_to_le16(x) __swab16((x)) +#define __le16_to_cpu(x) __swab16((x)) +#define __cpu_to_be64(x) ((__u64)(x)) +#define __be64_to_cpu(x) ((__u64)(x)) +#define __cpu_to_be32(x) ((__u32)(x)) +#define __be32_to_cpu(x) ((__u32)(x)) +#define __cpu_to_be16(x) ((__u16)(x)) +#define __be16_to_cpu(x) ((__u16)(x)) +#define __cpu_to_le64p(x) __swab64p((x)) +#define __le64_to_cpup(x) __swab64p((x)) +#define __cpu_to_le32p(x) __swab32p((x)) +#define __le32_to_cpup(x) __swab32p((x)) +#define __cpu_to_le16p(x) __swab16p((x)) +#define __le16_to_cpup(x) __swab16p((x)) +#define __cpu_to_be64p(x) (*(__u64*)(x)) +#define __be64_to_cpup(x) (*(__u64*)(x)) +#define __cpu_to_be32p(x) (*(__u32*)(x)) +#define __be32_to_cpup(x) (*(__u32*)(x)) +#define __cpu_to_be16p(x) (*(__u16*)(x)) +#define __be16_to_cpup(x) (*(__u16*)(x)) +#define __cpu_to_le64s(x) __swab64s((x)) +#define __le64_to_cpus(x) __swab64s((x)) +#define __cpu_to_le32s(x) __swab32s((x)) +#define __le32_to_cpus(x) __swab32s((x)) +#define __cpu_to_le16s(x) __swab16s((x)) +#define __le16_to_cpus(x) __swab16s((x)) +#define __cpu_to_be64s(x) do {} while (0) +#define __be64_to_cpus(x) do {} while (0) +#define __cpu_to_be32s(x) do {} while (0) +#define __be32_to_cpus(x) do {} while (0) +#define __cpu_to_be16s(x) do {} while (0) +#define __be16_to_cpus(x) do {} while (0) + +#include + +#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h new file mode 100644 index 0000000..cff850f --- /dev/null +++ b/include/linux/byteorder/generic.h @@ -0,0 +1,180 @@ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder_generic.h + * Generic Byte-reordering support + * + * Francois-Rene Rideau 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by : + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + + +#if defined(__KERNEL__) +/* + * inside the kernel, we can use nicknames; + * outside of it, we must avoid POSIX namespace pollution... + */ +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus +#endif + + +/* + * Handle ntohl and suches. These have various compatibility + * issues - like we want to give the prototype even though we + * also have a macro for them in case some strange program + * wants to take the address of the thing or something.. + * + * Note that these used to return a "long" in libc5, even though + * long is often 64-bit these days.. Thus the casts. + * + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +/* + * Do the prototypes. Somebody might want to take the + * address or some such sick thing.. + */ +#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2) +extern __u32 ntohl(__u32); +extern __u32 htonl(__u32); +#else +extern unsigned long int ntohl(unsigned long int); +extern unsigned long int htonl(unsigned long int); +#endif +extern unsigned short int ntohs(unsigned short int); +extern unsigned short int htons(unsigned short int); + + +#if defined(__GNUC__) && (__GNUC__ >= 2) + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2) +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#else +#define htonl(x) ((unsigned long)___htonl(x)) +#define ntohl(x) ((unsigned long)___ntohl(x)) +#endif +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +#endif /* OPTIMIZE */ + + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h new file mode 100644 index 0000000..a46f3ec --- /dev/null +++ b/include/linux/byteorder/little_endian.h @@ -0,0 +1,69 @@ +#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#include + +#define __constant_htonl(x) ___constant_swab32((x)) +#define __constant_ntohl(x) ___constant_swab32((x)) +#define __constant_htons(x) ___constant_swab16((x)) +#define __constant_ntohs(x) ___constant_swab16((x)) +#define __constant_cpu_to_le64(x) ((__u64)(x)) +#define __constant_le64_to_cpu(x) ((__u64)(x)) +#define __constant_cpu_to_le32(x) ((__u32)(x)) +#define __constant_le32_to_cpu(x) ((__u32)(x)) +#define __constant_cpu_to_le16(x) ((__u16)(x)) +#define __constant_le16_to_cpu(x) ((__u16)(x)) +#define __constant_cpu_to_be64(x) ___constant_swab64((x)) +#define __constant_be64_to_cpu(x) ___constant_swab64((x)) +#define __constant_cpu_to_be32(x) ___constant_swab32((x)) +#define __constant_be32_to_cpu(x) ___constant_swab32((x)) +#define __constant_cpu_to_be16(x) ___constant_swab16((x)) +#define __constant_be16_to_cpu(x) ___constant_swab16((x)) +#define __cpu_to_le64(x) ((__u64)(x)) +#define __le64_to_cpu(x) ((__u64)(x)) +#define __cpu_to_le32(x) ((__u32)(x)) +#define __le32_to_cpu(x) ((__u32)(x)) +#define __cpu_to_le16(x) ((__u16)(x)) +#define __le16_to_cpu(x) ((__u16)(x)) +#define __cpu_to_be64(x) __swab64((x)) +#define __be64_to_cpu(x) __swab64((x)) +#define __cpu_to_be32(x) __swab32((x)) +#define __be32_to_cpu(x) __swab32((x)) +#define __cpu_to_be16(x) __swab16((x)) +#define __be16_to_cpu(x) __swab16((x)) +#define __cpu_to_le64p(x) (*(__u64*)(x)) +#define __le64_to_cpup(x) (*(__u64*)(x)) +#define __cpu_to_le32p(x) (*(__u32*)(x)) +#define __le32_to_cpup(x) (*(__u32*)(x)) +#define __cpu_to_le16p(x) (*(__u16*)(x)) +#define __le16_to_cpup(x) (*(__u16*)(x)) +#define __cpu_to_be64p(x) __swab64p((x)) +#define __be64_to_cpup(x) __swab64p((x)) +#define __cpu_to_be32p(x) __swab32p((x)) +#define __be32_to_cpup(x) __swab32p((x)) +#define __cpu_to_be16p(x) __swab16p((x)) +#define __be16_to_cpup(x) __swab16p((x)) +#define __cpu_to_le64s(x) do {} while (0) +#define __le64_to_cpus(x) do {} while (0) +#define __cpu_to_le32s(x) do {} while (0) +#define __le32_to_cpus(x) do {} while (0) +#define __cpu_to_le16s(x) do {} while (0) +#define __le16_to_cpus(x) do {} while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + +#include + +#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h new file mode 100644 index 0000000..b1d570e --- /dev/null +++ b/include/linux/byteorder/swab.h @@ -0,0 +1,158 @@ +#ifndef _LINUX_BYTEORDER_SWAB_H +#define _LINUX_BYTEORDER_SWAB_H + +/* + * linux/byteorder/swab.h + * Byte-swapping, independently from CPU endianness + * swabXX[ps]?(foo) + * + * Francois-Rene Rideau 19971205 + * separated swab functions from cpu_to_XX, + * to clean up support for bizarre-endian architectures. + * + * See asm-i386/byteorder.h and suches for examples of how to provide + * architecture-dependent optimized versions + * + */ + +/* casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define ___swab16(x) \ + ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8) )) +#define ___swab32(x) \ + ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24) )) +#define ___swab64(x) \ + ((__u64)( \ + (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) )) + +/* + * provide defaults when no architecture-specific optimization is detected + */ +#ifndef __arch__swab16 +# define __arch__swab16(x) ___swab16(x) +#endif +#ifndef __arch__swab32 +# define __arch__swab32(x) ___swab32(x) +#endif +#ifndef __arch__swab64 +# define __arch__swab64(x) ___swab64(x) +#endif + +#ifndef __arch__swab16p +# define __arch__swab16p(x) __swab16(*(x)) +#endif +#ifndef __arch__swab32p +# define __arch__swab32p(x) __swab32(*(x)) +#endif +#ifndef __arch__swab64p +# define __arch__swab64p(x) __swab64(*(x)) +#endif + +#ifndef __arch__swab16s +# define __arch__swab16s(x) do { *(x) = __swab16p((x)); } while (0) +#endif +#ifndef __arch__swab32s +# define __arch__swab32s(x) do { *(x) = __swab32p((x)); } while (0) +#endif +#ifndef __arch__swab64s +# define __arch__swab64s(x) do { *(x) = __swab64p((x)); } while (0) +#endif + + +/* + * Allow constant folding + */ +#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) +# define __swab16(x) \ +(__builtin_constant_p((__u16)(x)) ? \ + ___swab16((x)) : \ + __fswab16((x))) +# define __swab32(x) \ +(__builtin_constant_p((__u32)(x)) ? \ + ___swab32((x)) : \ + __fswab32((x))) +# define __swab64(x) \ +(__builtin_constant_p((__u64)(x)) ? \ + ___swab64((x)) : \ + __fswab64((x))) +#else +# define __swab16(x) __fswab16(x) +# define __swab32(x) __fswab32(x) +# define __swab64(x) __fswab64(x) +#endif /* OPTIMIZE */ + + +static __inline__ __attribute__((const)) __u16 __fswab16(__u16 x) +{ + return __arch__swab16(x); +} +static __inline__ __u16 __swab16p(__u16 *x) +{ + return __arch__swab16p(x); +} +static __inline__ void __swab16s(__u16 *addr) +{ + __arch__swab16s(addr); +} + +static __inline__ __attribute__((const)) __u32 __fswab32(__u32 x) +{ + return __arch__swab32(x); +} +static __inline__ __u32 __swab32p(__u32 *x) +{ + return __arch__swab32p(x); +} +static __inline__ void __swab32s(__u32 *addr) +{ + __arch__swab32s(addr); +} + +#ifdef __BYTEORDER_HAS_U64__ +static __inline__ __attribute__((const)) __u64 __fswab64(__u64 x) +{ +# ifdef __SWAB_64_THRU_32__ + __u32 h = x >> 32; + __u32 l = x & ((1ULL<<32)-1); + return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); +# else + return __arch__swab64(x); +# endif +} +static __inline__ __u64 __swab64p(__u64 *x) +{ + return __arch__swab64p(x); +} +static __inline__ void __swab64s(__u64 *addr) +{ + __arch__swab64s(addr); +} +#endif /* __BYTEORDER_HAS_U64__ */ + +#if defined(__KERNEL__) +#define swab16 __swab16 +#define swab32 __swab32 +#define swab64 __swab64 +#define swab16p __swab16p +#define swab32p __swab32p +#define swab64p __swab64p +#define swab16s __swab16s +#define swab32s __swab32s +#define swab64s __swab64s +#endif + +#endif /* _LINUX_BYTEORDER_SWAB_H */ -- cgit v1.1