diff options
-rw-r--r-- | fastbootd/commands/partitions.c | 5 | ||||
-rw-r--r-- | fastbootd/other/gptedit.c | 19 | ||||
-rw-r--r-- | fastbootd/utils.c | 6 | ||||
-rw-r--r-- | include/cutils/atomic-aarch64.h | 227 | ||||
-rw-r--r-- | include/cutils/atomic-inline.h | 4 | ||||
-rw-r--r-- | include/cutils/atomic.h | 17 | ||||
-rw-r--r-- | libbacktrace/Android.mk | 5 | ||||
-rw-r--r-- | rootdir/init.environ.rc.in | 1 |
8 files changed, 268 insertions, 16 deletions
diff --git a/fastbootd/commands/partitions.c b/fastbootd/commands/partitions.c index de80ea3..74232e6 100644 --- a/fastbootd/commands/partitions.c +++ b/fastbootd/commands/partitions.c @@ -42,6 +42,7 @@ #include <sys/ioctl.h> #include <stdlib.h> #include <cutils/config_utils.h> +#include <inttypes.h> #include "partitions.h" #include "debug.h" @@ -80,7 +81,7 @@ int gpt_mmap(struct GPT_mapping *mapping, uint64_t location, int size, int fd) uint64_t sz = get_file_size64(fd); if (sz < size + location) { - D(ERR, "the location of mapping area is outside of the device size %lld", sz); + D(ERR, "the location of mapping area is outside of the device size %" PRId64, sz); return 1; } location = ALIGN_DOWN(location, PAGE_SIZE); @@ -89,7 +90,7 @@ int gpt_mmap(struct GPT_mapping *mapping, uint64_t location, int size, int fd) if (mapping->map_ptr == MAP_FAILED) { mapping->ptr = MAP_FAILED; - D(ERR, "map failed %d", (int) mapping->map_ptr); + D(ERR, "map failed: %s", strerror(errno)); return 1; } diff --git a/fastbootd/other/gptedit.c b/fastbootd/other/gptedit.c index 16d34a5..d423529 100644 --- a/fastbootd/other/gptedit.c +++ b/fastbootd/other/gptedit.c @@ -29,9 +29,10 @@ * SUCH DAMAGE. */ +#include <getopt.h> +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> -#include <getopt.h> #include <unistd.h> #include <cutils/klog.h> @@ -185,7 +186,7 @@ void printGPT(struct GPT_entry_table *table) { name[m] = entry->name[m] & 127; } name[m] = 0; - printf("#%03d %13lld %13lld %s\n", + printf("#%03d %13"PRId64" %13"PRId64" %s\n", n + 1, entry->first_lba, entry->last_lba, name); } } @@ -197,11 +198,11 @@ void configPrintGPT(struct GPT_entry_table *table) { char temp_guid[17]; temp_guid[16] = 0; - printf("header_lba %lld\n", table->header->current_lba); - printf("backup_lba %lld\n", table->header->backup_lba); - printf("first_lba %lld\n", table->header->first_usable_lba); - printf("last_lba %lld\n", table->header->last_usable_lba); - printf("entries_lba %lld\n", table->header->entries_lba); + printf("header_lba %"PRId64"\n", table->header->current_lba); + printf("backup_lba %"PRId64"\n", table->header->backup_lba); + printf("first_lba %"PRId64"\n", table->header->first_usable_lba); + printf("last_lba %"PRId64"\n", table->header->last_usable_lba); + printf("entries_lba %"PRId64"\n", table->header->entries_lba); snprintf(temp_guid, 17, "%s", table->header->disk_guid); printf("guid \"%s\"", temp_guid); @@ -220,8 +221,8 @@ void configPrintGPT(struct GPT_entry_table *table) { printf(" %s {\n", name); snprintf(temp_guid, 17, "%s", entry->partition_guid); printf(" guid \"%s\"\n", temp_guid); - printf(" first_lba %lld\n", entry->first_lba); - printf(" partition_size %lld\n", size); + printf(" first_lba %"PRId64"\n", entry->first_lba); + printf(" partition_size %"PRId64"\n", size); if (entry->flags & GPT_FLAG_SYSTEM) printf(" system\n"); if (entry->flags & GPT_FLAG_BOOTABLE) diff --git a/fastbootd/utils.c b/fastbootd/utils.c index fe3f0f8..bef2463 100644 --- a/fastbootd/utils.c +++ b/fastbootd/utils.c @@ -169,7 +169,7 @@ ssize_t bulk_write(int bulk_in, const char *buf, size_t length) do { ret = TEMP_FAILURE_RETRY(write(bulk_in, buf + count, length - count)); if (ret < 0) { - D(WARN, "[ bulk_write failed fd=%d length=%d errno=%d %s ]", + D(WARN, "[ bulk_write failed fd=%d length=%zu errno=%d %s ]", bulk_in, length, errno, strerror(errno)); return -1; } else { @@ -190,13 +190,13 @@ ssize_t bulk_read(int bulk_out, char *buf, size_t length) size_t to_read = (length - n > READ_BUF_SIZE) ? READ_BUF_SIZE : length - n; ret = TEMP_FAILURE_RETRY(read(bulk_out, buf + n, to_read)); if (ret < 0) { - D(WARN, "[ bulk_read failed fd=%d length=%d errno=%d %s ]", + D(WARN, "[ bulk_read failed fd=%d length=%zu errno=%d %s ]", bulk_out, length, errno, strerror(errno)); return ret; } n += ret; if (ret < (ssize_t)to_read) { - D(VERBOSE, "bulk_read short read, ret=%zd to_read=%u n=%u length=%u", + D(VERBOSE, "bulk_read short read, ret=%zd to_read=%zu n=%zu length=%zu", ret, to_read, n, length); break; } diff --git a/include/cutils/atomic-aarch64.h b/include/cutils/atomic-aarch64.h new file mode 100644 index 0000000..927379f --- /dev/null +++ b/include/cutils/atomic-aarch64.h @@ -0,0 +1,227 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef ANDROID_CUTILS_ATOMIC_AARCH64_H +#define ANDROID_CUTILS_ATOMIC_AARCH64_H + +#include <stdint.h> + +#ifndef ANDROID_ATOMIC_INLINE +#define ANDROID_ATOMIC_INLINE inline __attribute__((always_inline)) +#endif + +/* + TODOAArch64: Revisit the below functions and check for potential + optimizations using assembly code or otherwise. +*/ + +extern ANDROID_ATOMIC_INLINE +void android_compiler_barrier(void) +{ + __asm__ __volatile__ ("" : : : "memory"); +} + +#if ANDROID_SMP == 0 +extern ANDROID_ATOMIC_INLINE +void android_memory_barrier(void) +{ + android_compiler_barrier(); +} +extern ANDROID_ATOMIC_INLINE +void android_memory_store_barrier(void) +{ + android_compiler_barrier(); +} +#else +extern ANDROID_ATOMIC_INLINE +void android_memory_barrier(void) +{ + __asm__ __volatile__ ("dmb ish" : : : "memory"); +} +extern ANDROID_ATOMIC_INLINE +void android_memory_store_barrier(void) +{ + __asm__ __volatile__ ("dmb isht" : : : "memory"); +} +#endif + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_acquire_load(volatile const int32_t *ptr) +{ + int32_t value = *ptr; + android_memory_barrier(); + return value; +} + +extern ANDROID_ATOMIC_INLINE +int64_t android_atomic_acquire_load64(volatile const int64_t *ptr) +{ + int64_t value = *ptr; + android_memory_barrier(); + return value; +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_release_load(volatile const int32_t *ptr) +{ + android_memory_barrier(); + return *ptr; +} + +extern ANDROID_ATOMIC_INLINE +int64_t android_atomic_release_load64(volatile const int64_t *ptr) +{ + android_memory_barrier(); + return *ptr; +} + +extern ANDROID_ATOMIC_INLINE +void android_atomic_acquire_store(int32_t value, volatile int32_t *ptr) +{ + *ptr = value; + android_memory_barrier(); +} + +extern ANDROID_ATOMIC_INLINE +void android_atomic_acquire_store64(int64_t value, volatile int64_t *ptr) +{ + *ptr = value; + android_memory_barrier(); +} + +extern ANDROID_ATOMIC_INLINE +void android_atomic_release_store(int32_t value, volatile int32_t *ptr) +{ + android_memory_barrier(); + *ptr = value; +} + +extern ANDROID_ATOMIC_INLINE +void android_atomic_release_store64(int64_t value, volatile int64_t *ptr) +{ + android_memory_barrier(); + *ptr = value; +} + +extern ANDROID_ATOMIC_INLINE +int android_atomic_cas(int32_t old_value, int32_t new_value, + volatile int32_t *ptr) +{ + return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value; +} + +extern ANDROID_ATOMIC_INLINE +int64_t android_atomic_cas64(int64_t old_value, int64_t new_value, + volatile int64_t *ptr) +{ + return __sync_val_compare_and_swap(ptr, old_value, new_value) != old_value; +} + +extern ANDROID_ATOMIC_INLINE +int android_atomic_acquire_cas(int32_t old_value, int32_t new_value, + volatile int32_t *ptr) +{ + int status = android_atomic_cas(old_value, new_value, ptr); + android_memory_barrier(); + return status; +} + +extern ANDROID_ATOMIC_INLINE +int64_t android_atomic_acquire_cas64(int64_t old_value, int64_t new_value, + volatile int64_t *ptr) +{ + int status = android_atomic_cas64(old_value, new_value, ptr); + android_memory_barrier(); + return status; +} + +extern ANDROID_ATOMIC_INLINE +int android_atomic_release_cas(int32_t old_value, int32_t new_value, + volatile int32_t *ptr) +{ + android_memory_barrier(); + return android_atomic_cas(old_value, new_value, ptr); +} + +extern ANDROID_ATOMIC_INLINE +int64_t android_atomic_release_cas64(int64_t old_value, int64_t new_value, + volatile int64_t *ptr) +{ + android_memory_barrier(); + return android_atomic_cas64(old_value, new_value, ptr); +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_add(int32_t increment, volatile int32_t *ptr) +{ + int32_t prev, status; + android_memory_barrier(); + do { + prev = *ptr; + status = android_atomic_cas(prev, prev + increment, ptr); + } while (__builtin_expect(status != 0, 0)); + return prev; +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_inc(volatile int32_t *addr) +{ + return android_atomic_add(1, addr); +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_dec(volatile int32_t *addr) +{ + return android_atomic_add(-1, addr); +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_and(int32_t value, volatile int32_t *ptr) +{ + int32_t prev, status; + android_memory_barrier(); + do { + prev = *ptr; + status = android_atomic_cas(prev, prev & value, ptr); + } while (__builtin_expect(status != 0, 0)); + return prev; +} + +extern ANDROID_ATOMIC_INLINE +int32_t android_atomic_or(int32_t value, volatile int32_t *ptr) +{ + int32_t prev, status; + android_memory_barrier(); + do { + prev = *ptr; + status = android_atomic_cas(prev, prev | value, ptr); + } while (__builtin_expect(status != 0, 0)); + return prev; +} + +#endif /* ANDROID_CUTILS_ATOMIC_AARCH64_H */ diff --git a/include/cutils/atomic-inline.h b/include/cutils/atomic-inline.h index 0b13138..29a28d5 100644 --- a/include/cutils/atomic-inline.h +++ b/include/cutils/atomic-inline.h @@ -43,7 +43,9 @@ extern "C" { # error "Must define ANDROID_SMP before including atomic-inline.h" #endif -#if defined(__arm__) +#if defined(__aarch64__) +#include <cutils/atomic-aarch64.h> +#elif defined(__arm__) #include <cutils/atomic-arm.h> #elif defined(__i386__) || defined(__x86_64__) #include <cutils/atomic-x86.h> diff --git a/include/cutils/atomic.h b/include/cutils/atomic.h index ae42eb8..1787e34 100644 --- a/include/cutils/atomic.h +++ b/include/cutils/atomic.h @@ -80,6 +80,11 @@ int32_t android_atomic_or(int32_t value, volatile int32_t* addr); int32_t android_atomic_acquire_load(volatile const int32_t* addr); int32_t android_atomic_release_load(volatile const int32_t* addr); +#if defined (__LP64__) +int64_t android_atomic_acquire_load64(volatile const int64_t* addr); +int64_t android_atomic_release_load64(volatile const int64_t* addr); +#endif + /* * Perform an atomic store with "acquire" or "release" ordering. * @@ -89,6 +94,11 @@ int32_t android_atomic_release_load(volatile const int32_t* addr); void android_atomic_acquire_store(int32_t value, volatile int32_t* addr); void android_atomic_release_store(int32_t value, volatile int32_t* addr); +#if defined (__LP64__) +void android_atomic_acquire_store64(int64_t value, volatile int64_t* addr); +void android_atomic_release_store64(int64_t value, volatile int64_t* addr); +#endif + /* * Compare-and-set operation with "acquire" or "release" ordering. * @@ -106,6 +116,13 @@ int android_atomic_acquire_cas(int32_t oldvalue, int32_t newvalue, int android_atomic_release_cas(int32_t oldvalue, int32_t newvalue, volatile int32_t* addr); +#if defined (__LP64__) +int64_t android_atomic_acquire_cas64(int64_t old_value, int64_t new_value, + volatile int64_t *ptr); +int64_t android_atomic_release_cas64(int64_t old_value, int64_t new_value, + volatile int64_t *ptr); +#endif + /* * Aliases for code using an older version of this header. These are now * deprecated and should not be used. The definitions will be removed diff --git a/libbacktrace/Android.mk b/libbacktrace/Android.mk index 8995fc1..d2fd79e 100644 --- a/libbacktrace/Android.mk +++ b/libbacktrace/Android.mk @@ -148,6 +148,11 @@ LOCAL_CFLAGS += \ -DGTEST_OS_LINUX_ANDROID \ -DGTEST_HAS_STD_STRING \ +ifeq ($(TARGET_ARCH),aarch64) + $(info TODO: $(LOCAL_PATH)/Android.mk -fstack-protector not yet available for the AArch64 toolchain) + LOCAL_CFLAGS += -fno-stack-protector +endif # aarch64 + LOCAL_CONLYFLAGS += \ $(common_conlyflags) \ diff --git a/rootdir/init.environ.rc.in b/rootdir/init.environ.rc.in index d2f74c0..927c33d 100644 --- a/rootdir/init.environ.rc.in +++ b/rootdir/init.environ.rc.in @@ -1,7 +1,6 @@ # set up the global environment on init export PATH /sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin - export LD_LIBRARY_PATH /vendor/lib:/system/lib export ANDROID_BOOTLOGO 1 export ANDROID_ROOT /system export ANDROID_ASSETS /system/app |