summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/Makefile532
-rw-r--r--core/apicheck_msg_current.txt71
-rw-r--r--core/base_rules.mk10
-rw-r--r--core/binary.mk33
-rw-r--r--core/ccache.mk10
-rwxr-xr-xcore/checktree24
-rw-r--r--core/clang/HOST_x86_common.mk5
-rw-r--r--core/clang/arm.mk2
-rw-r--r--core/clang/config.mk8
-rw-r--r--core/cleanbuild.mk5
-rw-r--r--core/clear_vars.mk6
-rw-r--r--core/combo/HOST_darwin-x86.mk13
-rw-r--r--core/combo/HOST_darwin-x86_64.mk6
-rw-r--r--core/combo/TARGET_linux-arm.mk1
-rw-r--r--core/combo/arch/arm/armv7-a-neon.mk19
-rw-r--r--core/combo/mac_version.mk21
-rw-r--r--core/config.mk66
-rw-r--r--core/definitions.mk77
-rw-r--r--core/dex_preopt_odex_install.mk2
-rw-r--r--core/distdir.mk2
-rw-r--r--core/droiddoc.mk6
-rw-r--r--core/dumpvar.mk5
-rw-r--r--core/dynamic_binary.mk14
-rw-r--r--core/envsetup.mk8
-rw-r--r--core/generate_extra_images.mk118
-rw-r--r--core/host_java_library.mk3
-rw-r--r--core/java.mk12
-rw-r--r--core/java_library.mk6
-rw-r--r--core/main.mk76
-rw-r--r--core/mtk_target.mk13
-rwxr-xr-xcore/mtk_utils.mk5
-rw-r--r--core/notice_files.mk2
-rw-r--r--core/package_internal.mk20
-rw-r--r--core/pathmap.mk31
-rw-r--r--core/pdk_config.mk2
-rw-r--r--core/prebuilt_internal.mk9
-rw-r--r--core/product.mk70
-rw-r--r--core/product_config.mk55
-rw-r--r--core/qcom_target.mk128
-rwxr-xr-xcore/qcom_utils.mk230
-rw-r--r--core/static_java_library.mk6
-rw-r--r--core/tasks/apicheck.mk4
-rw-r--r--core/tasks/boot_jars_package_check.mk2
-rwxr-xr-xcore/tasks/check_boot_jars/check_boot_jars.py8
-rw-r--r--core/tasks/collect_gpl_sources.mk2
-rw-r--r--core/tasks/dt_image.mk48
-rw-r--r--core/tasks/kernel.mk367
-rw-r--r--core/tasks/oem_image.mk11
-rw-r--r--core/tasks/product-graph.mk11
-rw-r--r--core/tasks/sdk-addon.mk2
-rw-r--r--envsetup.sh1109
-rw-r--r--target/board/generic/sepolicy/property_contexts2
-rw-r--r--target/product/base.mk1
-rw-r--r--target/product/core.mk2
-rw-r--r--target/product/core_base.mk5
-rw-r--r--target/product/core_minimal.mk8
-rw-r--r--target/product/core_tiny.mk2
-rw-r--r--target/product/emulator.mk3
-rw-r--r--target/product/full_base.mk19
-rw-r--r--target/product/full_base_telephony.mk7
-rw-r--r--target/product/generic_no_telephony.mk8
-rw-r--r--target/product/languages_full.mk4
-rw-r--r--target/product/sdk_phone_arm64.mk2
-rw-r--r--target/product/sdk_phone_armv7.mk2
-rw-r--r--target/product/sdk_phone_mips.mk2
-rw-r--r--target/product/sdk_phone_mips64.mk2
-rw-r--r--target/product/sdk_phone_x86.mk2
-rw-r--r--target/product/sdk_phone_x86_64.mk2
-rw-r--r--target/product/security/cm-devkey.x509.pem23
-rw-r--r--target/product/security/cm.x509.pem21
-rw-r--r--target/product/telephony.mk2
-rwxr-xr-xtools/adbs2
-rw-r--r--tools/apicheck/Android.mk2
-rwxr-xr-xtools/buildinfo.sh27
-rwxr-xr-xtools/check_radio_versions.py22
-rwxr-xr-xtools/compare_fileslist.py51
-rw-r--r--tools/device/AndroidBoard.mk.template8
-rw-r--r--tools/device/AndroidProducts.mk.template2
-rw-r--r--tools/device/BoardConfig.mk.template31
-rw-r--r--tools/device/cm.mk.template15
-rw-r--r--tools/device/device.mk.template24
-rwxr-xr-xtools/device/mkvendor.sh113
-rw-r--r--tools/device/recovery.fstab.template10
-rw-r--r--tools/device/system.prop.template3
-rwxr-xr-xtools/diff_package_overlays.py6
-rw-r--r--tools/event_log_tags.py10
-rwxr-xr-xtools/fileslist.py6
-rwxr-xr-xtools/filter-product-graph.py12
-rwxr-xr-xtools/findleaves.py6
-rwxr-xr-xtools/generate-notice-files.py83
-rwxr-xr-xtools/getb64key.py18
-rwxr-xr-xtools/java-event-log-tags.py28
-rwxr-xr-xtools/java-layers.py37
-rwxr-xr-xtools/merge-event-log-tags.py39
-rwxr-xr-xtools/parsedeps.py39
-rwxr-xr-xtools/post_process_props.py34
-rwxr-xr-xtools/product_debug.py22
-rwxr-xr-xtools/releasetools/add_img_to_target_files.py118
-rw-r--r--tools/releasetools/blockimgdiff.py18
-rwxr-xr-xtools/releasetools/build_image.py85
-rwxr-xr-xtools/releasetools/check_target_files_signatures.py79
-rw-r--r--tools/releasetools/common.py311
-rw-r--r--tools/releasetools/edify_generator.py64
-rwxr-xr-xtools/releasetools/img_from_target_files.py51
-rwxr-xr-xtools/releasetools/make_recovery_patch.py4
-rwxr-xr-xtools/releasetools/ota_from_target_files.py274
-rw-r--r--tools/releasetools/rangelib.py2
-rwxr-xr-xtools/releasetools/sign_target_files_apks.py85
-rw-r--r--tools/releasetools/sparse_img.py28
-rwxr-xr-xtools/repopick.py377
-rwxr-xr-xtools/roomservice.py297
-rw-r--r--tools/signapk/SignApk.java21
-rwxr-xr-xtools/warn.py6
113 files changed, 5046 insertions, 839 deletions
diff --git a/core/Makefile b/core/Makefile
index 07d1c94..3ac9b0d 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1,5 +1,17 @@
# Put some miscellaneous rules here
+# Build system colors
+
+ifneq ($(BUILD_WITH_COLORS),0)
+ CL_RED="\033[31m"
+ CL_GRN="\033[32m"
+ CL_YLW="\033[33m"
+ CL_BLU="\033[34m"
+ CL_MAG="\033[35m"
+ CL_CYN="\033[36m"
+ CL_RST="\033[0m"
+endif
+
# HACK: clear LOCAL_PATH from including last build target before calling
# intermedites-dir-for
LOCAL_PATH := $(BUILD_SYSTEM)
@@ -34,7 +46,6 @@ unique_product_copy_files_destinations :=
$(foreach cf,$(unique_product_copy_files_pairs), \
$(eval _src := $(call word-colon,1,$(cf))) \
$(eval _dest := $(call word-colon,2,$(cf))) \
- $(call check-product-copy-files,$(cf)) \
$(if $(filter $(unique_product_copy_files_destinations),$(_dest)), \
$(info PRODUCT_COPY_FILES $(cf) ignored.), \
$(eval _fulldest := $(call append-path,$(PRODUCT_OUT),$(_dest))) \
@@ -113,8 +124,22 @@ endif
BUILD_VERSION_TAGS += $(BUILD_KEYS)
BUILD_VERSION_TAGS := $(subst $(space),$(comma),$(sort $(BUILD_VERSION_TAGS)))
+# If the final fingerprint should be different than what was used by the build system,
+# we can allow that too.
+ifeq ($(TARGET_VENDOR_PRODUCT_NAME),)
+TARGET_VENDOR_PRODUCT_NAME := $(TARGET_PRODUCT)
+endif
+
+ifeq ($(TARGET_VENDOR_DEVICE_NAME),)
+TARGET_VENDOR_DEVICE_NAME := $(TARGET_DEVICE)
+endif
+
+ifeq ($(TARGET_VENDOR_RELEASE_BUILD_ID),)
+TARGET_VENDOR_RELEASE_BUILD_ID := $(BUILD_NUMBER)
+endif
+
# A human-readable string that descibes this build in detail.
-build_desc := $(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(BUILD_NUMBER) $(BUILD_VERSION_TAGS)
+build_desc := $(TARGET_VENDOR_PRODUCT_NAME)-$(TARGET_BUILD_VARIANT) $(PLATFORM_VERSION) $(BUILD_ID) $(TARGET_VENDOR_RELEASE_BUILD_ID) $(BUILD_VERSION_TAGS)
$(intermediate_system_build_prop): PRIVATE_BUILD_DESC := $(build_desc)
# The string used to uniquely identify the combined build and product; used by the OTA server.
@@ -126,7 +151,7 @@ ifeq (,$(strip $(BUILD_FINGERPRINT)))
else
BF_BUILD_NUMBER := $(BUILD_NUMBER)
endif
- BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_PRODUCT)/$(TARGET_DEVICE):$(PLATFORM_VERSION)/$(BUILD_ID)/$(BF_BUILD_NUMBER):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
+ BUILD_FINGERPRINT := $(PRODUCT_BRAND)/$(TARGET_VENDOR_PRODUCT_NAME)/$(TARGET_VENDOR_DEVICE_NAME):$(PLATFORM_VERSION)/$(BUILD_ID)/$(TARGET_VENDOR_RELEASE_BUILD_ID):$(TARGET_BUILD_VARIANT)/$(BUILD_VERSION_TAGS)
endif
ifneq ($(words $(BUILD_FINGERPRINT)),1)
$(error BUILD_FINGERPRINT cannot contain spaces: "$(BUILD_FINGERPRINT)")
@@ -192,8 +217,9 @@ ifneq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OEM_PROPERTIES),)
endif
$(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \
TARGET_BUILD_FLAVOR="$(TARGET_PRODUCT)-$(TARGET_BUILD_VARIANT)" \
- TARGET_DEVICE="$(TARGET_DEVICE)" \
- PRODUCT_NAME="$(TARGET_PRODUCT)" \
+ TARGET_DEVICE="$(TARGET_VENDOR_DEVICE_NAME)" \
+ CM_DEVICE="$(TARGET_DEVICE)" \
+ PRODUCT_NAME="$(TARGET_VENDOR_PRODUCT_NAME)" \
PRODUCT_BRAND="$(PRODUCT_BRAND)" \
PRODUCT_DEFAULT_LOCALE="$(call get-default-product-locale,$(PRODUCT_LOCALES))" \
PRODUCT_DEFAULT_WIFI_CHANNELS="$(PRODUCT_DEFAULT_WIFI_CHANNELS)" \
@@ -221,6 +247,10 @@ endif
TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
TARGET_AAPT_CHARACTERISTICS="$(TARGET_AAPT_CHARACTERISTICS)" \
+ TARGET_UNIFIED_DEVICE="$(TARGET_UNIFIED_DEVICE)" \
+ TARGET_SKIP_DEFAULT_LOCALE="$(TARGET_SKIP_DEFAULT_LOCALE)" \
+ TARGET_SKIP_PRODUCT_DEVICE="$(TARGET_SKIP_PRODUCT_DEVICE)" \
+ $(PRODUCT_BUILD_PROP_OVERRIDES) \
bash $(BUILDINFO_SH) >> $@
$(hide) $(foreach file,$(system_prop_file), \
if [ -f "$(file)" ]; then \
@@ -238,7 +268,7 @@ endif
$(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \
echo "$(line)" >> $@;)
$(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@
- $(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
+ $(hide) build/tools/post_process_props.py $@ "$(PRODUCT_PROPERTY_UBER_OVERRIDES)" $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
build_desc :=
@@ -376,6 +406,9 @@ endif
# exist with the suffixes ".x509.pem" and ".pk8".
DEFAULT_KEY_CERT_PAIR := $(DEFAULT_SYSTEM_DEV_CERTIFICATE)
+ifneq ($(OTA_PACKAGE_SIGNING_KEY),)
+ DEFAULT_KEY_CERT_PAIR := $(OTA_PACKAGE_SIGNING_KEY)
+endif
# Rules that need to be present for the all targets, even
# if they don't do anything.
@@ -499,17 +532,25 @@ ifdef BOARD_KERNEL_PAGESIZE
INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
endif
+INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img
+
+ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true)
+ INTERNAL_BOOTIMAGE_ARGS += --dt $(INSTALLED_DTIMAGE_TARGET)
+ BOOTIMAGE_EXTRA_DEPS := $(INSTALLED_DTIMAGE_TARGET)
+endif
+
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
$(error TARGET_BOOTIMAGE_USE_EXT2 is not supported anymore)
else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)) # TARGET_BOOTIMAGE_USE_EXT2 != true
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER) $(BOOTIMAGE_EXTRA_DEPS)
$(call pretty,"Target boot image: $@")
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
$(BOOT_SIGNER) /boot $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $@
$(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ @echo -e ${CL_CYN}"Made boot image: $@"${CL_RST}
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER)
@@ -517,10 +558,13 @@ bootimage-nodeps: $(MKBOOTIMG) $(BOOT_SIGNER)
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET)
$(BOOT_SIGNER) /boot $(INSTALLED_BOOTIMAGE_TARGET) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(INSTALLED_BOOTIMAGE_TARGET)
$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ @echo -e ${CL_INS}"Made boot image: $@"${CL_RST}
+
+else ifndef BOARD_CUSTOM_BOOTIMG_MK
-else ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true
+ ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)) # PRODUCT_SUPPORTS_BOOT_SIGNER != true
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(BOOTIMAGE_EXTRA_DEPS)
$(call pretty,"Target boot image: $@")
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned
$(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@
@@ -533,20 +577,23 @@ bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER)
$(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
-else # PRODUCT_SUPPORTS_VBOOT != true
+ else # PRODUCT_SUPPORTS_VBOOT != true
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOTIMAGE_EXTRA_DEPS)
$(call pretty,"Target boot image: $@")
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@
$(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ @echo -e ${CL_CYN}"Made boot image: $@"${CL_RST}
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG)
@echo "make $@: ignoring dependencies"
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET)
$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ @echo -e ${CL_INS}"Made boot image: $@"${CL_RST}
-endif # TARGET_BOOTIMAGE_USE_EXT2
+ endif # PRODUCT_SUPPORTS_VBOOT
+endif # PRODUCT_SUPPORTS_BOOT_SIGNER / BOARD_CUSTOM_BOOTIMG_MK
else # TARGET_NO_KERNEL
# HACK: The top-level targets depend on the bootimage. Not all targets
@@ -650,7 +697,7 @@ endif # TARGET_BUILD_APPS
$(kernel_notice_file): \
prebuilts/qemu-kernel/arm/LINUX_KERNEL_COPYING \
| $(ACP)
- @echo Copying: $@
+ @echo -e ${CL_CYN}"Copying:"${CL_RST}" $@"
$(hide) mkdir -p $(dir $@)
$(hide) $(ACP) $< $@
@@ -691,6 +738,15 @@ INTERNAL_USERIMAGES_EXT_VARIANT := ext4
endif
endif
endif
+ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
+INTERNAL_USERIMAGES_USE_F2FS := true
+ifeq ($(INTERNAL_USERIMAGES_EXT_VARIANT),)
+INTERNAL_USERIMAGES_EXT_VARIANT := f2fs
+endif
+endif
+ifeq ($(TARGET_USERIMAGES_USE_YAFFS),true)
+INTERNAL_USERIMAGES_USE_YAFFS := true
+endif
# These options tell the recovery updater/installer how to mount the partitions writebale.
# <fstype>=<fstype_opts>[|<fstype_opts>]...
@@ -703,12 +759,15 @@ ifneq (true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED))
INTERNAL_USERIMAGES_SPARSE_EXT_FLAG := -s
endif
+INTERNAL_USERIMAGES_DEPS :=
ifeq ($(INTERNAL_USERIMAGES_USE_EXT),true)
-INTERNAL_USERIMAGES_DEPS := $(SIMG2IMG)
INTERNAL_USERIMAGES_DEPS += $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(E2FSCK)
-ifeq ($(TARGET_USERIMAGES_USE_F2FS),true)
+endif
+ifeq ($(INTERNAL_USERIMAGES_USE_F2FS),true)
INTERNAL_USERIMAGES_DEPS += $(MKF2FSUSERIMG) $(MAKE_F2FS)
endif
+ifeq ($(INTERNAL_USERIMAGES_USE_YAFFS),true)
+INTERNAL_USERIMAGES_DEPS += $(MKYAFFS2)
endif
ifeq ($(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs)
@@ -719,6 +778,8 @@ ifeq ($(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),squashfs)
INTERNAL_USERIMAGES_DEPS += $(MAKE_SQUASHFS) $(MKSQUASHFSUSERIMG) $(IMG2SIMG)
endif
+INTERNAL_USERIMAGES_DEPS += $(SIMG2IMG)
+
INTERNAL_USERIMAGES_BINARY_PATHS := $(sort $(dir $(INTERNAL_USERIMAGES_DEPS)))
ifeq (true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY))
@@ -740,6 +801,8 @@ $(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR),$(hide) echo "system_squashfs_comp
$(if $(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT),$(hide) echo "system_squashfs_compressor_opt=$(BOARD_SYSTEMIMAGE_SQUASHFS_COMPRESSOR_OPT)" >> $(1))
$(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
$(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),$(hide) echo "userdataextra_size=$(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME),$(hide) echo "userdataextra_name=$(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME)" >> $(1))
$(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
$(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
$(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -748,6 +811,7 @@ $(if $(BOARD_VENDORIMAGE_JOURNAL_SIZE),$(hide) echo "vendor_journal_size=$(BOARD
$(if $(BOARD_OEMIMAGE_PARTITION_SIZE),$(hide) echo "oem_size=$(BOARD_OEMIMAGE_PARTITION_SIZE)" >> $(1))
$(if $(BOARD_OEMIMAGE_JOURNAL_SIZE),$(hide) echo "oem_journal_size=$(BOARD_OEMIMAGE_JOURNAL_SIZE)" >> $(1))
$(if $(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG),$(hide) echo "extfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG)" >> $(1))
+$(if $(mkyaffs2_extra_flags),$(hide) echo "mkyaffs2_extra_flags=$(mkyaffs2_extra_flags)" >> $(1))
$(hide) echo "selinux_fc=$(SELINUX_FC)" >> $(1)
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER),$(hide) echo "boot_signer=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)" >> $(1))
@@ -774,15 +838,19 @@ ifdef INSTALLED_RECOVERYIMAGE_TARGET
INTERNAL_RECOVERYIMAGE_FILES := $(filter $(TARGET_RECOVERY_OUT)/%, \
$(ALL_DEFAULT_INSTALLED_MODULES))
-recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
+recovery_initrc := $(call project-path-for,recovery)/etc/init.rc
recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
recovery_build_prop := $(intermediate_system_build_prop)
-recovery_resources_common := $(call include-path-for, recovery)/res
+recovery_uncompressed_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.cpio
+recovery_resources_common := $(call project-path-for,recovery)/res
# Set recovery_density to the density bucket of the device.
recovery_density := unknown
+ifneq (,$(TARGET_RECOVERY_DENSITY))
+recovery_density := $(filter %dpi,$(TARGET_RECOVERY_DENSITY))
+else
ifneq (,$(PRODUCT_AAPT_PREF_CONFIG))
# If PRODUCT_AAPT_PREF_CONFIG includes a dpi bucket, then use that value.
recovery_density := $(filter %dpi,$(PRODUCT_AAPT_PREF_CONFIG))
@@ -790,6 +858,7 @@ else
# Otherwise, use the default medium density.
recovery_densities := mdpi
endif
+endif
ifneq (,$(wildcard $(recovery_resources_common)-$(recovery_density)))
recovery_resources_common := $(recovery_resources_common)-$(recovery_density)
@@ -797,20 +866,37 @@ else
recovery_resources_common := $(recovery_resources_common)-xhdpi
endif
+ifneq (,$(RECOVERY_EXTRA_RESOURCE_DIR))
+recovery_resources_extra := $(strip \
+ $(wildcard $(RECOVERY_EXTRA_RESOURCE_DIR)/res-$(recovery_density)))
+endif
+
# Select the 18x32 font on high-density devices (xhdpi and up); and
# the 12x22 font on other devices. Note that the font selected here
# can be overridden for a particular device by putting a font.png in
# its private recovery resources.
ifneq (,$(filter xxxhdpi 560dpi xxhdpi 400dpi xhdpi,$(recovery_density)))
-recovery_font := $(call include-path-for, recovery)/fonts/18x32.png
+recovery_font := $(call project-path-for,recovery)/fonts/18x32.png
+else
+recovery_font := $(call project-path-for,recovery)/fonts/12x22.png
+endif
+
+ifneq ($(TARGET_RECOVERY_DEVICE_DIRS),)
+recovery_root_private := $(strip \
+ $(foreach d,$(TARGET_RECOVERY_DEVICE_DIRS), $(wildcard $(d)/recovery/root)))
else
-recovery_font := $(call include-path-for, recovery)/fonts/12x22.png
+recovery_root_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/root))
+endif
+ifneq ($(recovery_root_private),)
+recovery_root_deps := $(shell find $(recovery_root_private) -type f)
endif
recovery_resources_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/res))
recovery_resource_deps := $(shell find $(recovery_resources_common) \
- $(recovery_resources_private) -type f)
+ $(recovery_resources_extra) $(recovery_resources_private) -type f)
+
+
ifdef TARGET_RECOVERY_FSTAB
recovery_fstab := $(TARGET_RECOVERY_FSTAB)
else
@@ -818,6 +904,7 @@ recovery_fstab := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery.fstab))
endif
# Named '.dat' so we don't attempt to use imgdiff for patching it.
RECOVERY_RESOURCE_ZIP := $(TARGET_OUT)/etc/recovery-resource.dat
+RECOVERY_PATCH_INSTALL := $(TARGET_OUT)/bin/install-recovery.sh
ifeq ($(recovery_resources_private),)
$(info No private recovery resources for TARGET_DEVICE $(TARGET_DEVICE))
@@ -834,6 +921,11 @@ INTERNAL_RECOVERYIMAGE_ARGS := \
# Assumes this has already been stripped
ifdef BOARD_KERNEL_CMDLINE
+ ifdef BUILD_ENFORCE_SELINUX
+ ifneq (,$(filter androidboot.selinux=permissive androidboot.selinux=disabled, $(BOARD_KERNEL_CMDLINE)))
+ $(error "Trying to apply non-default selinux settings. Aborting")
+ endif
+ endif
INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
endif
ifdef BOARD_KERNEL_BASE
@@ -843,12 +935,25 @@ BOARD_KERNEL_PAGESIZE := $(strip $(BOARD_KERNEL_PAGESIZE))
ifdef BOARD_KERNEL_PAGESIZE
INTERNAL_RECOVERYIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
endif
+ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true)
+ INTERNAL_RECOVERYIMAGE_ARGS += --dt $(INSTALLED_DTIMAGE_TARGET)
+ RECOVERYIMAGE_EXTRA_DEPS := $(INSTALLED_DTIMAGE_TARGET)
+endif
# Keys authorized to sign OTA packages this build will accept. The
# build always uses dev-keys for this; release packaging tools will
# substitute other keys for this one.
OTA_PUBLIC_KEYS := $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem
+ifneq ($(OTA_PACKAGE_SIGNING_KEY),)
+ OTA_PUBLIC_KEYS := $(OTA_PACKAGE_SIGNING_KEY).x509.pem
+ PRODUCT_EXTRA_RECOVERY_KEYS := $(DEFAULT_SYSTEM_DEV_CERTIFICATE)
+else
+ PRODUCT_EXTRA_RECOVERY_KEYS += \
+ build/target/product/security/cm \
+ build/target/product/security/cm-devkey
+endif
+
# Generate a file containing the keys that will be read by the
# recovery binary.
RECOVERY_INSTALL_OTA_KEYS := \
@@ -862,15 +967,13 @@ $(RECOVERY_INSTALL_OTA_KEYS): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR) $(extra_keys)
@mkdir -p $(dir $@)
java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys) > $@
-RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id
-# $(1): output file
-define build-recoveryimage-target
- @echo ----- Making recovery image ------
+define build-recoveryramdisk
+ @echo -e ${CL_CYN}"----- Making recovery ramdisk ------"${CL_RST}
$(hide) mkdir -p $(TARGET_RECOVERY_OUT)
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/tmp
- @echo Copying baseline ramdisk...
+ @echo -e ${CL_CYN}"Copying baseline ramdisk..."${CL_RST}
$(hide) rsync -a $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
- @echo Modifying ramdisk contents...
+ @echo -e ${CL_CYN}"Modifying ramdisk contents..."${CL_RST}
$(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
$(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
$(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
@@ -879,15 +982,23 @@ define build-recoveryimage-target
$(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
$(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
$(hide) cp -rf $(recovery_resources_common)/* $(TARGET_RECOVERY_ROOT_OUT)/res
+ $(hide) $(foreach item,$(recovery_resources_extra), \
+ cp -rf $(item)/* $(TARGET_RECOVERY_ROOT_OUT)/res;)
$(hide) cp -f $(recovery_font) $(TARGET_RECOVERY_ROOT_OUT)/res/images/font.png
+ $(hide) $(foreach item,$(recovery_root_private), \
+ cp -rf $(item) $(TARGET_RECOVERY_OUT)/;)
$(hide) $(foreach item,$(recovery_resources_private), \
- cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/)
+ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/;)
$(hide) $(foreach item,$(recovery_fstab), \
- cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab)
+ cp -f $(item) $(TARGET_RECOVERY_ROOT_OUT)/etc/recovery.fstab;)
$(hide) cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys
$(hide) cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
> $(TARGET_RECOVERY_ROOT_OUT)/default.prop
- $(hide) $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk)
+endef
+
+RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id
+# $(1): output file
+define build-recoveryimage-target
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
$(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned, \
$(hide) $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1) --id > $(RECOVERYIMAGE_ID_FILE))
@@ -896,19 +1007,123 @@ define build-recoveryimage-target
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
$(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1))
$(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))
- @echo ----- Made recovery image: $(1) --------
+ @echo -e ${CL_CYN}"Made recovery image: $@"${CL_RST}
endef
-$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \
+$(recovery_uncompressed_ramdisk): $(MKBOOTFS) \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_BOOTIMAGE_TARGET) \
$(INTERNAL_RECOVERYIMAGE_FILES) \
- $(recovery_initrc) $(recovery_sepolicy) $(recovery_kernel) \
+ $(recovery_initrc) $(recovery_sepolicy) \
$(INSTALLED_2NDBOOTLOADER_TARGET) \
- $(recovery_build_prop) $(recovery_resource_deps) \
+ $(recovery_build_prop) $(recovery_resource_deps) $(recovery_root_deps) \
$(recovery_fstab) \
$(RECOVERY_INSTALL_OTA_KEYS)
- $(call build-recoveryimage-target, $@)
+ $(call build-recoveryramdisk)
+ @echo -e ${CL_CYN}"----- Making uncompressed recovery ramdisk ------"${CL_RST}
+ $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $@
+
+$(recovery_ramdisk): $(MINIGZIP) \
+ $(recovery_uncompressed_ramdisk)
+ @echo -e ${CL_CYN}"----- Making compressed recovery ramdisk ------"${CL_RST}
+ $(hide) $(MINIGZIP) < $(recovery_uncompressed_ramdisk) > $@
+
+ifndef BOARD_CUSTOM_BOOTIMG_MK
+$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTIMG) $(recovery_ramdisk) $(recovery_kernel) \
+ $(RECOVERYIMAGE_EXTRA_DEPS)
+ @echo -e ${CL_CYN}"----- Making recovery image ------"${CL_RST}
+ $(call build-recoveryimage-target, $@)
+endif # BOARD_CUSTOM_BOOTIMG_MK
+
+# The system partition needs room for the recovery image as well. We
+# now store the recovery image as a binary patch using the boot image
+# as the source (since they are very similar). Generate the patch so
+# we can see how big it's going to be, and include that in the system
+# image size check calculation.
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch)
+ifndef BOARD_CANT_BUILD_RECOVERY_FROM_BOOT_PATCH
+RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p
+else
+RECOVERY_FROM_BOOT_PATCH :=
+endif
+$(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(HOST_OUT_EXECUTABLES)/imgdiff \
+ $(HOST_OUT_EXECUTABLES)/bsdiff
+ @echo -e ${CL_CYN}"Construct recovery from boot"${CL_RST}
+ mkdir -p $(dir $@)
+ifeq ($(TARGET_NOT_USE_GZIP_RECOVERY_RAMDISK),true)
+ PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/bsdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@
+else
+ PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/imgdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@
+endif
+endif
+
+recovery_patch_path := $(call intermediates-dir-for,PACKAGING,recovery_patch)
+ota_temp_root := $(PRODUCT_OUT)/ota_temp
+$(RECOVERY_PATCH_INSTALL): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
+$(RECOVERY_PATCH_INSTALL): PRIVATE_RECOVERY_FSTAB_VERSION := $(RECOVERY_FSTAB_VERSION)
+$(RECOVERY_PATCH_INSTALL): $(INSTALLED_RECOVERYIMAGE_TARGET) \
+ $(RECOVERY_FROM_BOOT_PATCH) \
+ $(ALL_PREBUILT) \
+ $(ALL_COPIED_HEADERS) \
+ $(ALL_GENERATED_SOURCES) \
+ $(ALL_DEFAULT_INSTALLED_MODULES) \
+ $(PDK_FUSION_SYSIMG_FILES)
+ @echo "Installing recovery patch to system partition"
+ $(hide) mkdir -p $(ota_temp_root)
+ $(hide) mkdir -p $(ota_temp_root)/BOOT
+ $(hide) mkdir -p $(ota_temp_root)/RECOVERY
+ $(hide) mkdir -p $(ota_temp_root)/BOOTABLE_IMAGES
+ $(hide) mkdir -p $(ota_temp_root)/SYSTEM
+ $(hide) mkdir -p $(ota_temp_root)/SYSTEM/bin
+ $(hide) mkdir -p $(ota_temp_root)/SYSTEM/etc
+ $(hide) mkdir -p $(ota_temp_root)/META
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_RECOVERY_ROOT_OUT),$(ota_temp_root)/RECOVERY/RAMDISK)
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_ROOT_OUT),$(ota_temp_root)/BOOT/RAMDISK)
+ $(hide) $(call package_files-copy-root, \
+ $(TARGET_OUT)/etc,$(ota_temp_root)/SYSTEM/etc)
+ @echo "recovery_api_version=$(PRIVATE_RECOVERY_API_VERSION)" > $(ota_temp_root)/META/misc_info.txt
+ @echo "fstab_version=$(PRIVATE_RECOVERY_FSTAB_VERSION)" >> $(ota_temp_root)/META/misc_info.txt
+ifdef BOARD_FLASH_BLOCK_SIZE
+ @echo "blocksize=$(BOARD_FLASH_BLOCK_SIZE)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+ @echo "boot_size=$(BOARD_BOOTIMAGE_PARTITION_SIZE)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+ @echo "recovery_size=$(BOARD_RECOVERYIMAGE_PARTITION_SIZE)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ifdef TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS
+ @# TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS can be empty to indicate that nothing but defaults should be used.
+ @echo "recovery_mount_options=$(TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(ota_temp_root)/META/misc_info.txt
+else
+ @echo "recovery_mount_options=$(DEFAULT_TARGET_RECOVERY_FSTYPE_MOUNT_OPTIONS)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ @echo "tool_extensions=$(tool_extensions)" >> $(ota_temp_root)/META/misc_info.txt
+ @echo "default_system_dev_certificate=$(DEFAULT_SYSTEM_DEV_CERTIFICATE)" >> $(ota_temp_root)/META/misc_info.txt
+ifdef PRODUCT_EXTRA_RECOVERY_KEYS
+ @echo "extra_recovery_keys=$(PRODUCT_EXTRA_RECOVERY_KEYS)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ @echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(ota_temp_root)/META/misc_info.txt
+ @echo "use_set_metadata=1" >> $(ota_temp_root)/META/misc_info.txt
+ @echo "multistage_support=1" >> $(ota_temp_root)/META/misc_info.txt
+ @echo "update_rename_support=1" >> $(ota_temp_root)/META/misc_info.txt
+ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
+ # OTA scripts are only interested in fingerprint related properties
+ @echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(ota_temp_root)/META/misc_info.txt
+endif
+ $(call generate-userimage-prop-dictionary, $(ota_temp_root)/META/misc_info.txt)
+ $(hide) cp -r $(PRODUCT_OUT)/boot.img $(ota_temp_root)/BOOTABLE_IMAGES/
+ $(hide) cp -r $(PRODUCT_OUT)/recovery.img $(ota_temp_root)/BOOTABLE_IMAGES/
+ $(hide) ./build/tools/releasetools/make_recovery_patch $(ota_temp_root) $(ota_temp_root)
+ $(hide) rm -f $(TARGET_OUT)/bin/install-recovery.sh
+ $(hide) rm -f $(TARGET_OUT)/recovery-from-boot.p
+ $(hide) cp $(ota_temp_root)/SYSTEM/bin/install-recovery.sh $(TARGET_OUT)/bin/install-recovery.sh
+ $(hide) cp $(ota_temp_root)/SYSTEM/recovery-from-boot.p $(TARGET_OUT)/recovery-from-boot.p
$(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET)
$(hide) mkdir -p $(dir $@)
@@ -917,23 +1132,35 @@ $(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET)
.PHONY: recoveryimage-nodeps
recoveryimage-nodeps:
@echo "make $@: ignoring dependencies"
+ $(call build-recoveryramdisk)
+ $(hide) $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) > $(recovery_uncompressed_ramdisk)
+ $(hide) $(MINIGZIP) < $(recovery_uncompressed_ramdisk) > $(recovery_ramdisk)
$(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET))
else
RECOVERY_RESOURCE_ZIP :=
endif
-
.PHONY: recoveryimage
recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET) $(RECOVERY_RESOURCE_ZIP)
-ifeq ($(BOARD_NAND_PAGE_SIZE),)
+ifneq ($(BOARD_NAND_PAGE_SIZE),)
+mkyaffs2_extra_flags := -c $(BOARD_NAND_PAGE_SIZE)
+else
+mkyaffs2_extra_flags :=
BOARD_NAND_PAGE_SIZE := 2048
endif
-ifeq ($(BOARD_NAND_SPARE_SIZE),)
+ifneq ($(BOARD_NAND_SPARE_SIZE),)
+mkyaffs2_extra_flags += -s $(BOARD_NAND_SPARE_SIZE)
+else
BOARD_NAND_SPARE_SIZE := 64
endif
+ifdef BOARD_CUSTOM_BOOTIMG_MK
+include $(BOARD_CUSTOM_BOOTIMG_MK)
+endif
+
+
# -----------------------------------------------------------------
# system image
#
@@ -948,7 +1175,8 @@ INTERNAL_SYSTEMIMAGE_FILES := $(filter $(TARGET_OUT)/%, \
$(ALL_GENERATED_SOURCES) \
$(ALL_DEFAULT_INSTALLED_MODULES) \
$(PDK_FUSION_SYSIMG_FILES) \
- $(RECOVERY_RESOURCE_ZIP))
+ $(RECOVERY_RESOURCE_ZIP) \
+ $(RECOVERY_PATCH_INSTALL))
FULL_SYSTEMIMAGE_DEPS := $(INTERNAL_SYSTEMIMAGE_FILES) $(INTERNAL_USERIMAGES_DEPS)
@@ -989,6 +1217,18 @@ define create-system-vendor-symlink
endef
endif
+# Only Create symlink /system/vendor to /vendor if necessary.
+ifdef BOARD_NEEDS_VENDORIMAGE_SYMLINK
+define create-system-vendor-symlink
+$(hide) if [ -d $(TARGET_OUT)/vendor ] && [ ! -h $(TARGET_OUT)/vendor ]; then \
+ echo 'Non-symlink $(TARGET_OUT)/vendor detected!' 1>&2; \
+ echo 'You cannot install files to $(TARGET_OUT)/vendor while building a separate vendor.img!' 1>&2; \
+ exit 1; \
+fi
+$(hide) ln -sf /vendor $(TARGET_OUT)/vendor
+endef
+endif
+
# $(1): output file
define build-systemimage-target
@echo "Target system fs image: $(1)"
@@ -1017,29 +1257,16 @@ endef
$(BUILT_SYSTEMIMAGE): $(FULL_SYSTEMIMAGE_DEPS) $(INSTALLED_FILES_FILE)
$(call build-systemimage-target,$@)
-INSTALLED_SYSTEMIMAGE := $(PRODUCT_OUT)/system.img
-SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT)
-
-# The system partition needs room for the recovery image as well. We
-# now store the recovery image as a binary patch using the boot image
-# as the source (since they are very similar). Generate the patch so
-# we can see how big it's going to be, and include that in the system
-# image size check calculation.
-ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
-intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch)
-RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p
-$(RECOVERY_FROM_BOOT_PATCH): $(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_BOOTIMAGE_TARGET) \
- $(HOST_OUT_EXECUTABLES)/imgdiff \
- $(HOST_OUT_EXECUTABLES)/bsdiff
- @echo "Construct recovery from boot"
- mkdir -p $(dir $@)
- PATH=$(HOST_OUT_EXECUTABLES):$$PATH $(HOST_OUT_EXECUTABLES)/imgdiff $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_RECOVERYIMAGE_TARGET) $@
+ifndef I_WANT_A_QUAIL_STAR
+$(BUILT_SYSTEMIMAGE): checkapi
+$(BUILT_SYSTEMIMAGE): checkapi-cm
endif
+INSTALLED_SYSTEMIMAGE := $(PRODUCT_OUT)/system.img
+SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT)
$(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(RECOVERY_FROM_BOOT_PATCH) | $(ACP)
- @echo "Install system fs image: $@"
+ @echo -e ${CL_CYN}"Install system fs image: $@"${CL_RST}
$(copy-file-to-target)
$(hide) $(call assert-max-image-size,$@ $(RECOVERY_FROM_BOOT_PATCH),$(BOARD_SYSTEMIMAGE_PARTITION_SIZE))
@@ -1295,9 +1522,12 @@ endif
# host tools needed to build dist and OTA packages
DISTTOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \
+ $(HOST_OUT_EXECUTABLES)/adb \
$(HOST_OUT_EXECUTABLES)/mkbootfs \
$(HOST_OUT_EXECUTABLES)/mkbootimg \
+ $(HOST_OUT_EXECUTABLES)/unpackbootimg \
$(HOST_OUT_EXECUTABLES)/fs_config \
+ $(HOST_OUT_EXECUTABLES)/mkyaffs2image \
$(HOST_OUT_EXECUTABLES)/zipalign \
$(HOST_OUT_EXECUTABLES)/bsdiff \
$(HOST_OUT_EXECUTABLES)/imgdiff \
@@ -1385,7 +1615,13 @@ built_ota_tools := \
$(call intermediates-dir-for,EXECUTABLES,applypatch_static,,,$(TARGET_PREFER_32_BIT))/applypatch_static \
$(call intermediates-dir-for,EXECUTABLES,check_prereq,,,$(TARGET_PREFER_32_BIT))/check_prereq \
$(call intermediates-dir-for,EXECUTABLES,sqlite3,,,$(TARGET_PREFER_32_BIT))/sqlite3 \
- $(call intermediates-dir-for,EXECUTABLES,updater,,,$(TARGET_PREFER_32_BIT))/updater
+
+ifeq ($(TARGET_ARCH),arm64)
+built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater,,,32)/updater
+else
+built_ota_tools += $(call intermediates-dir-for,EXECUTABLES,updater)/updater
+endif
+
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools)
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
@@ -1398,8 +1634,24 @@ else
$(BUILT_TARGET_FILES_PACKAGE): tool_extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
endif
+ifeq ($(BOARD_USES_UBOOT_MULTIIMAGE),true)
+
+ ZIP_SAVE_UBOOTIMG_ARGS := -A ARM -O Linux -T multi -C none -n Image
+
+ BOARD_UBOOT_ENTRY := $(strip $(BOARD_UBOOT_ENTRY))
+ ifdef BOARD_UBOOT_ENTRY
+ ZIP_SAVE_UBOOTIMG_ARGS += -e $(BOARD_UBOOT_ENTRY)
+ endif
+ BOARD_UBOOT_LOAD := $(strip $(BOARD_UBOOT_LOAD))
+ ifdef BOARD_UBOOT_LOAD
+ ZIP_SAVE_UBOOTIMG_ARGS += -a $(BOARD_UBOOT_LOAD)
+ endif
+
+endif
+
# Depending on the various images guarantees that the underlying
# directories are up-to-date.
+include $(BUILD_SYSTEM)/tasks/oem_image.mk
$(BUILT_TARGET_FILES_PACKAGE): \
$(INSTALLED_BOOTIMAGE_TARGET) \
$(INSTALLED_RADIOIMAGE_TARGET) \
@@ -1408,19 +1660,29 @@ $(BUILT_TARGET_FILES_PACKAGE): \
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_CACHEIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
+ $(INSTALLED_OEMIMAGE_TARGET) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
$(SELINUX_FC) \
$(built_ota_tools) \
$(APKCERTS_FILE) \
$(HOST_OUT_EXECUTABLES)/fs_config \
| $(ACP)
- @echo "Package target files: $@"
+ @echo -e ${CL_YLW}"Package target files:"${CL_RST}" $@"
$(hide) rm -rf $@ $(zip_root)
$(hide) mkdir -p $(dir $@) $(zip_root)
@# Components of the recovery image
$(hide) mkdir -p $(zip_root)/RECOVERY
$(hide) $(call package_files-copy-root, \
$(TARGET_RECOVERY_ROOT_OUT),$(zip_root)/RECOVERY/RAMDISK)
+ @# OTA install helpers
+ $(hide) $(call package_files-copy-root, $(OUT)/install, $(zip_root)/INSTALL)
+# Just copy the already built boot/recovery images into the target-files dir
+# in order to avoid mismatched images between the out dir and what the ota
+# build system tries to rebuild.
+ $(hide) mkdir -p $(zip_root)/BOOTABLE_IMAGES
+ $(hide) $(ACP) $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/
+ $(hide) $(ACP) $(INSTALLED_RECOVERYIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/
+
ifdef INSTALLED_KERNEL_TARGET
$(hide) $(ACP) $(INSTALLED_KERNEL_TARGET) $(zip_root)/RECOVERY/kernel
endif
@@ -1428,6 +1690,9 @@ ifdef INSTALLED_2NDBOOTLOADER_TARGET
$(hide) $(ACP) \
$(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/RECOVERY/second
endif
+ifdef BOARD_KERNEL_TAGS_OFFSET
+ $(hide) echo "$(BOARD_KERNEL_TAGS_OFFSET)" > $(zip_root)/RECOVERY/tags_offset
+endif
ifdef BOARD_KERNEL_CMDLINE
$(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/RECOVERY/cmdline
endif
@@ -1437,6 +1702,15 @@ endif
ifdef BOARD_KERNEL_PAGESIZE
$(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/RECOVERY/pagesize
endif
+ifdef BOARD_KERNEL_TAGS_ADDR
+ $(hide) echo "$(BOARD_KERNEL_TAGS_ADDR)" > $(zip_root)/RECOVERY/tagsaddr
+endif
+ifdef BOARD_RAMDISK_OFFSET
+ $(hide) echo "$(BOARD_RAMDISK_OFFSET)" > $(zip_root)/RECOVERY/ramdisk_offset
+endif
+ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true)
+ $(hide) $(ACP) $(INSTALLED_DTIMAGE_TARGET) $(zip_root)/RECOVERY/dt
+endif
@# Components of the boot image
$(hide) mkdir -p $(zip_root)/BOOT
$(hide) $(call package_files-copy-root, \
@@ -1448,6 +1722,10 @@ ifdef INSTALLED_2NDBOOTLOADER_TARGET
$(hide) $(ACP) \
$(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
endif
+
+ifdef BOARD_KERNEL_TAGS_OFFSET
+ $(hide) echo "$(BOARD_KERNEL_TAGS_OFFSET)" > $(zip_root)/BOOT/tags_offset
+endif
ifdef BOARD_KERNEL_CMDLINE
$(hide) echo "$(BOARD_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
endif
@@ -1457,20 +1735,46 @@ endif
ifdef BOARD_KERNEL_PAGESIZE
$(hide) echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/BOOT/pagesize
endif
+ifdef BOARD_KERNEL_TAGS_ADDR
+ $(hide) echo "$(BOARD_KERNEL_TAGS_ADDR)" > $(zip_root)/BOOT/tagsaddr
+endif
+ifdef BOARD_RAMDISK_OFFSET
+ $(hide) echo "$(BOARD_RAMDISK_OFFSET)" > $(zip_root)/BOOT/ramdisk_offset
+endif
+ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true)
+ $(hide) $(ACP) $(INSTALLED_DTIMAGE_TARGET) $(zip_root)/BOOT/dt
+endif
+ifdef ZIP_SAVE_UBOOTIMG_ARGS
+ $(hide) echo "$(ZIP_SAVE_UBOOTIMG_ARGS)" > $(zip_root)/BOOT/ubootargs
+endif
$(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\
mkdir -p $(zip_root)/RADIO; \
$(ACP) $(t) $(zip_root)/RADIO/$(notdir $(t));)
+ $(hide) $(foreach fi,$(PRODUCT_FACTORYIMAGE_FILES),\
+ mkdir -p $(zip_root)/FACTORY; \
+ $(ACP) $(fi) $(zip_root)/FACTORY/$(notdir $(fi));)
@# Contents of the system image
$(hide) $(call package_files-copy-root, \
$(SYSTEMIMAGE_SOURCE_DIR),$(zip_root)/SYSTEM)
@# Contents of the data image
$(hide) $(call package_files-copy-root, \
$(TARGET_OUT_DATA),$(zip_root)/DATA)
+ifdef BOARD_CUSTOM_BOOTIMG
+ @# Prebuilt boot images
+ $(hide) mkdir -p $(zip_root)/BOOTABLE_IMAGES
+ $(hide) $(ACP) $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/
+ $(hide) $(ACP) $(INSTALLED_RECOVERYIMAGE_TARGET) $(zip_root)/BOOTABLE_IMAGES/
+endif
ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
@# Contents of the vendor image
$(hide) $(call package_files-copy-root, \
$(TARGET_OUT_VENDOR),$(zip_root)/VENDOR)
endif
+ifdef BOARD_OEMIMAGE_FILE_SYSTEM_TYPE
+ @# Contents of the oem image
+ $(call package_files-copy-root, \
+ $(TARGET_OUT_OEM),$(zip_root)/OEM)
+endif
@# Extra contents of the OTA package
$(hide) mkdir -p $(zip_root)/OTA/bin
$(hide) $(ACP) $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
@@ -1515,9 +1819,25 @@ ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
# OTA scripts are only interested in fingerprint related properties
$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
endif
+ifdef BUILD_NO
+ $(hide) echo "build_number=$(BUILD_NO)" >> $(zip_root)/META/misc_info.txt
+endif
+ifdef TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT
+ $(hide) echo "factory_from_target_script=$(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT)" >> $(zip_root)/META/misc_info.txt
+endif
$(call generate-userimage-prop-dictionary, $(zip_root)/META/misc_info.txt)
+ifeq ($(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT),)
$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
./build/tools/releasetools/make_recovery_patch $(zip_root) $(zip_root)
+else
+ $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+ $(TARGET_RELEASETOOL_MAKE_RECOVERY_PATCH_SCRIPT) $(zip_root) $(zip_root)
+endif
+ifdef PRODUCT_DEFAULT_DEV_CERTIFICATE
+ $(hide) build/tools/getb64key.py $(PRODUCT_DEFAULT_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt
+else
+ $(hide) build/tools/getb64key.py $(DEFAULT_SYSTEM_DEV_CERTIFICATE).x509.pem > $(zip_root)/META/releasekey.txt
+endif
@# Zip everything up, preserving symlinks
$(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .)
@# Run fs_config on all the system, vendor, boot ramdisk,
@@ -1555,18 +1875,90 @@ INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+ifeq ($(TARGET_RELEASETOOL_OTA_FROM_TARGET_SCRIPT),)
+ OTA_FROM_TARGET_SCRIPT := ./build/tools/releasetools/ota_from_target_files
+else
+ OTA_FROM_TARGET_SCRIPT := $(TARGET_RELEASETOOL_OTA_FROM_TARGET_SCRIPT)
+endif
+
+ifeq ($(WITH_GMS),true)
+ $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false
+else
+ifneq ($(CM_BUILD),)
+ $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := true
+else
+ $(INTERNAL_OTA_PACKAGE_TARGET): backuptool := false
+endif
+endif
+
+ifeq ($(TARGET_OTA_ASSERT_DEVICE),)
+ $(INTERNAL_OTA_PACKAGE_TARGET): override_device := auto
+else
+ $(INTERNAL_OTA_PACKAGE_TARGET): override_device := $(TARGET_OTA_ASSERT_DEVICE)
+endif
+
+ifneq ($(TARGET_UNIFIED_DEVICE),)
+ $(INTERNAL_OTA_PACKAGE_TARGET): override_prop := --override_prop=true
+ ifeq ($(TARGET_OTA_ASSERT_DEVICE),)
+ $(INTERNAL_OTA_PACKAGE_TARGET): override_device := $(TARGET_DEVICE)
+ endif
+endif
+
+ifneq ($(BLOCK_BASED_OTA),false)
+ $(INTERNAL_OTA_PACKAGE_TARGET): block_based := --block
+endif
+
$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
- @echo "Package OTA: $@"
+ @echo "$(OTA_FROM_TARGET_SCRIPT)" > $(PRODUCT_OUT)/ota_script_path
+ @echo "$(override_device)" > $(PRODUCT_OUT)/ota_override_device
+ @echo -e ${CL_YLW}"Package OTA:"${CL_RST}" $@"
$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- ./build/tools/releasetools/ota_from_target_files -v \
- --block \
+ $(OTA_FROM_TARGET_SCRIPT) -v \
+ $(block_based) \
-p $(HOST_OUT) \
-k $(KEY_CERT_PAIR) \
+ --backup=$(backuptool) \
+ --override_device=$(override_device) $(override_prop) \
$(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \
$(BUILT_TARGET_FILES_PACKAGE) $@
-.PHONY: otapackage
+CM_TARGET_PACKAGE := $(PRODUCT_OUT)/cm-$(CM_VERSION).zip
+
+.PHONY: otapackage bacon
otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)
+bacon: otapackage
+ $(hide) ln -f $(INTERNAL_OTA_PACKAGE_TARGET) $(CM_TARGET_PACKAGE)
+ $(hide) $(MD5SUM) $(CM_TARGET_PACKAGE) > $(CM_TARGET_PACKAGE).md5sum
+ @echo -e ${CL_CYN}"Package Complete: $(CM_TARGET_PACKAGE)"${CL_RST}
+
+# -----------------------------------------------------------------
+# The factory package
+
+name := $(TARGET_PRODUCT)-factory-$(FILE_NAME_TAG)
+
+INTERNAL_FACTORY_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+
+ifeq ($(TARGET_RELEASETOOLS_EXTENSIONS),)
+# default to common dir for device vendor
+$(INTERNAL_FACTORY_PACKAGE_TARGET): extensions := $(TARGET_DEVICE_DIR)/../common
+else
+$(INTERNAL_FACTORY_PACKAGE_TARGET): extensions := $(TARGET_RELEASETOOLS_EXTENSIONS)
+endif
+
+$(INTERNAL_FACTORY_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
+ @echo -e ${CL_YLW}"Package:"${CL_RST}" $@"
+ if [ -z $(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT) ]; then \
+ echo "Error: Factory script is not defined by target"; \
+ exit 1; \
+ fi
+ MKBOOTIMG=$(BOARD_CUSTOM_BOOTIMG_MK) \
+ $(TARGET_RELEASETOOL_FACTORY_FROM_TARGET_SCRIPT) -v \
+ -s $(extensions) \
+ -p $(HOST_OUT) \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
+
+.PHONY: factorypackage
+factorypackage: $(INTERNAL_FACTORY_PACKAGE_TARGET)
endif # recovery_fstab is defined
endif # TARGET_NO_KERNEL != true
@@ -1584,10 +1976,16 @@ name := $(name)-img-$(FILE_NAME_TAG)
INTERNAL_UPDATE_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+ifeq ($(TARGET_RELEASETOOL_IMG_FROM_TARGET_SCRIPT),)
+ IMG_FROM_TARGET_SCRIPT := ./build/tools/releasetools/img_from_target_files
+else
+ IMG_FROM_TARGET_SCRIPT := $(TARGET_RELEASETOOL_IMG_FROM_TARGET_SCRIPT)
+endif
+
$(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(DISTTOOLS)
- @echo "Package: $@"
+ @echo -e ${CL_YLW}"Package:"${CL_RST}" $@"
$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
- ./build/tools/releasetools/img_from_target_files -v \
+ $(IMG_FROM_TARGET_SCRIPT) -v \
-p $(HOST_OUT) \
$(BUILT_TARGET_FILES_PACKAGE) $@
@@ -1627,7 +2025,7 @@ name := $(name)-apps-$(FILE_NAME_TAG)
APPS_ZIP := $(PRODUCT_OUT)/$(name).zip
$(APPS_ZIP): $(INSTALLED_SYSTEMIMAGE)
- @echo "Package apps: $@"
+ @echo -e ${CL_YLW}"Package apps:"${CL_RST}" $@"
$(hide) rm -rf $@
$(hide) mkdir -p $(dir $@)
$(hide) zip -qj $@ $(TARGET_OUT_APPS)/*/*.apk $(TARGET_OUT_APPS_PRIVILEGED)/*/*.apk
@@ -1689,7 +2087,7 @@ name := $(TARGET_PRODUCT)-emulator-$(FILE_NAME_TAG)
INTERNAL_EMULATOR_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
$(INTERNAL_EMULATOR_PACKAGE_TARGET): $(INTERNAL_EMULATOR_PACKAGE_FILES)
- @echo "Package: $@"
+ @echo -e ${CL_YLW}"Package:"${CL_RST}" $@"
$(hide) zip -qj $@ $(INTERNAL_EMULATOR_PACKAGE_FILES)
endif
diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt
index 440e7f8..5ca7fa0 100644
--- a/core/apicheck_msg_current.txt
+++ b/core/apicheck_msg_current.txt
@@ -9,6 +9,77 @@ To make these errors go away, you have two choices:
2) You can update current.txt by executing the following command:
make update-api
+ ^^^^^^^^^^^^^^^^^^
+ CONGRATS YOU EARNED A QUAIL STAR!
+
+ M
+ MM
+ MMM
+ M.MM
+ MM M
+ 7M MM
+ MMMMM MMMMM
+ MMMMM .MMMMM
+ MMMM MMMM
+ MM M
+ MM M .M
+ M+M MMMM
+ .M++MM .MM
+ MM+++MM MM
+ 8NNNNN MM+++++MM
+ NNNN $Z8. MM+++++MM MM
+ MM $Z8M7IMNN+++++MM MM
+ .$$$D ~NNMNN+++++MM MMMM
+ INNNNM NMNM++++++M M M
+ NNO:NI=MM+++++++MM MM MM
+ 8M$MMMMMD?+++++++MM .MMMMMMMMMMMMMMM MMMMN MMMMM
+ M$$NMMMMMM$++++++++MMMMMMM=+++++++++++++MM MMMMM MMMMM
+ M77$IMMMMMN.,+++++++++++++++++++++++++++MM .MMMMM MMMMM
+ .??I8,?M777OM.?+++++++++++++++++++++++++MM MM MM
+ O==?M7MM$MMI7$.~M+++++++++++++++++++++++MM .M M
+ NMMM+~M??MMMMMMMMMMMI$$++++++++++++++++++++MM MMMM
+ MMMM++++MM~=+I$OMMMOO?7M$Z$$$+++++++++++++++++MM MM
+ NMMM++++++++~~MO~7$OM8O8OMZZ$Z$M$$M++++++++++++++MM7MMM MM
+ MMMM++++++++++++==D~M~:8N88MMOMMZDM$$Z$$M+++++++++++MM77777MMM
+MMM+++++++++++++++~MM~~M $O,NM88MOMMZ$$MM$$$+++++++++MM777777777MMMM
+ MMM++++++++++++M~M~IMMMO888NMOMMOZM$ZZDZ$$+++++++MM7777777777777OMMZ
+ MMM+++++++++++~~M~~MDOOMMO8NOOOOZZ$$Z.Z$$M++++MM77777777777777777MMM
+ MMM++++++++M.Z, D+ 8O88M8D,OOMDZZ$D.$$$N+++M7MMMMMD77777777777777MMM
+ .MM+++++++MM:.D:ZMMM8888OOOOOOZZ$ND$$$M++MM777777MMMM7777777777777MMD
+ MMM+++++~M.$.M~,~7M8?MON MOOZZ$$N$$$M++MD777777777MMMM77777777777MMM
+ MM=+++=ZMZ.MM MMZOOOO88OOZM$M.$$$$+++M7777777777777MMMM7777777777MM
+ MMM++MM~,,$M.+~M$OOMOOMZMI$$$$$$$++MM7777777777777777MMM777777777MM
+ MM++++=. ~$$.$.M~M$MZOM7MMZ$$$$$$++MMMMMMD7777777777777MMMI7777777MMM
+ .M++++++MM+OMI$7M??N+OZM8MMMD$$M$$++M77777MMMMN77777777777MMM7777777MMM
+ M++++++++M+=?+++++++++++MNMZN$$N$$+MM777777777MMMM7777777777MMM777777MM,
+ M+++++M=?7$$M+++++++++++++++$NO$$$$+M7777777777777MMMM777777777MMM77777MM
+ M++~M$M$M+++++M++MMM++++++++++M=$$D$MMMMMMMM7777777777MMM$7777777MMM77777MM
+ M+M$$$M+++++++++MM MMMMM+++++++M$Z$$M MMMMMI7777777MMMM7777777MM77777MM
+ M++7NMIN++Z++NMM MMMMM+++N$M$M MMMM7777777MMM777777MM$777MM
+ M=++8+++++++MM MMMMMZ$M$M MMMM777777MMM77777MMZ777MM
+ MM++++++++MM MM$ MMM77777MMM77777MM7777MM
+ MM++++++MM MMMM7777MMM7777MM777MM
+ MM++++MMM .MMM7777MM7777MM77$M
+ MM+++MM M MMM777MMN777MM77MM
+ NM+MM M MMM77MMM77NMM7MM
+ MM MM MMM77MMM77MM77M
+ .MMM MMM7MMM7IMM7MM
+ MM M MMM7MMM7MM7MM
+ M MM MM7MMN7MMMM
+ MMMM MMMM MMMMMIMMMM
+ MMMM. MMMMM MMMMMMMMM
+ MMMMM MMMMM MMMMMMMM
+ MM MM OMMMMMM
+ M MM MMMMMM
+ MM M MMMMM
+ MMM MMM
+ MM MM
+ M
+
+
+ NO. NO. STOP BEING LAZY. SERIOUSLY.
+ DO NOT DO THIS in CM. THIS IS A LIE. IT WILL BREAK THINGS.
+
To submit the revised current.txt to the main Android repository,
you will need approval.
******************************
diff --git a/core/base_rules.mk b/core/base_rules.mk
index ea64cc6..8ed6dcf 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -184,7 +184,7 @@ ifneq (true,$(LOCAL_UNINSTALLABLE_MODULE))
# Apk and its attachments reside in its own subdir.
ifeq ($(LOCAL_MODULE_CLASS),APPS)
# framework-res.apk doesn't like the additional layer.
- ifneq ($(LOCAL_NO_STANDARD_LIBRARIES),true)
+ ifeq ($(filter true,$(LOCAL_NO_STANDARD_LIBRARIES) $(LOCAL_IGNORE_SUBDIR)),)
my_module_path := $(my_module_path)/$(LOCAL_MODULE)
endif
endif
@@ -219,7 +219,7 @@ else
# build against the platform.
LOCAL_AIDL_INCLUDES += $(FRAMEWORKS_BASE_JAVA_SRC_DIRS)
endif # LOCAL_SDK_VERSION
-$(aidl_java_sources): PRIVATE_AIDL_FLAGS := -b $(addprefix -p,$(aidl_preprocess_import)) -I$(LOCAL_PATH) -I$(LOCAL_PATH)/src $(addprefix -I,$(LOCAL_AIDL_INCLUDES))
+$(aidl_java_sources): PRIVATE_AIDL_FLAGS := -b $(addprefix -p,$(aidl_preprocess_import)) -I$(LOCAL_PATH) -I$(LOCAL_PATH)/src $(addprefix -I,$(LOCAL_AIDL_INCLUDES)) $(LOCAL_AIDL_FLAGS)
$(aidl_java_sources): $(intermediates.COMMON)/src/%.java: \
$(TOPDIR)$(LOCAL_PATH)/%.aidl \
@@ -494,7 +494,7 @@ $(cleantarget) : PRIVATE_CLEAN_FILES := \
$(LOCAL_INSTALLED_MODULE) \
$(intermediates)
$(cleantarget)::
- @echo "Clean: $(PRIVATE_MODULE)"
+ @echo -e ${CL_GRN}"Clean:"${CL_RST}" $(PRIVATE_MODULE)"
$(hide) rm -rf $(PRIVATE_CLEAN_FILES)
###########################################################
@@ -556,12 +556,12 @@ ifndef LOCAL_UNINSTALLABLE_MODULE
$(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD)
ifneq ($(LOCAL_ACP_UNAVAILABLE),true)
$(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE) | $(ACP)
- @echo "Install: $@"
+ @echo -e ${CL_CYN}"Install: $@"${CL_RST}
$(copy-file-to-new-target)
$(PRIVATE_POST_INSTALL_CMD)
else
$(LOCAL_INSTALLED_MODULE): $(LOCAL_BUILT_MODULE)
- @echo "Install: $@"
+ @echo -e ${CL_CYN}"Install: $@"${CL_RST}
$(copy-file-to-target-with-cp)
endif
diff --git a/core/binary.mk b/core/binary.mk
index b8003d7..7610dce 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -30,6 +30,15 @@ else
endif
endif
+# Many qcom modules don't correctly set a dependency on the kernel headers. Fix it for them,
+# but warn the user.
+ifneq (,$(findstring $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include,$(LOCAL_C_INCLUDES)))
+ ifeq (,$(findstring $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr,$(LOCAL_ADDITIONAL_DEPENDENCIES)))
+ $(warning $(LOCAL_MODULE) uses kernel headers, but does not depend on them!)
+ LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+ endif
+endif
+
# The following LOCAL_ variables will be modified in this file.
# Because the same LOCAL_ variables may be used to define modules for both 1st arch and 2nd arch,
# we can't modify them in place.
@@ -538,12 +547,27 @@ proto_generated_headers := $(patsubst %.pb$(my_proto_source_suffix),%.pb.h, $(pr
proto_generated_objects := $(addprefix $(proto_generated_obj_dir)/, \
$(patsubst %.proto,%.pb.o,$(proto_sources_fullpath)))
+define copy-proto-files
+$(if $(PRIVATE_PROTOC_OUTPUT), \
+ $(if $(call streq,$(PRIVATE_PROTOC_INPUT),$(PRIVATE_PROTOC_OUTPUT)),, \
+ $(eval proto_generated_path := $(dir $(subst $(PRIVATE_PROTOC_INPUT),$(PRIVATE_PROTOC_OUTPUT),$@)))
+ $(eval proto_target_files := $(patsubst %.pb$(PRIVATE_PROTOC_SUFFIX), %.pb.*, $@))
+ @mkdir -p $(dir $(proto_generated_path))
+ @echo "Protobuf relocation: $(proto_target_files) => $(proto_generated_path)"
+ @cp -f $(proto_target_files) $(proto_generated_path) ),)
+endef
+
+
# Ensure the transform-proto-to-cc rule is only defined once in multilib build.
ifndef $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_proto_defined
$(proto_generated_sources): PRIVATE_PROTO_INCLUDES := $(TOP)
$(proto_generated_sources): PRIVATE_PROTOC_FLAGS := $(LOCAL_PROTOC_FLAGS) $(my_protoc_flags)
+$(proto_generated_sources): PRIVATE_PROTOC_OUTPUT := $(LOCAL_PROTOC_OUTPUT)
+$(proto_generated_sources): PRIVATE_PROTOC_INPUT := $(LOCAL_PATH)
+$(proto_generated_sources): PRIVATE_PROTOC_SUFFIX := $(my_proto_source_suffix)
$(proto_generated_sources): $(proto_generated_sources_dir)/%.pb$(my_proto_source_suffix): %.proto $(PROTOC)
$(transform-proto-to-cc)
+ $(copy-proto-files)
# This is just a dummy rule to make sure gmake doesn't skip updating the dependents.
$(proto_generated_headers): $(proto_generated_sources_dir)/%.pb.h: $(proto_generated_sources_dir)/%.pb$(my_proto_source_suffix)
@@ -925,7 +949,7 @@ import_includes_deps := $(strip \
$(call intermediates-dir-for,STATIC_LIBRARIES,$(l),$(LOCAL_IS_HOST_MODULE),,$(LOCAL_2ND_ARCH_VAR_PREFIX))/export_includes))
$(import_includes): PRIVATE_IMPORT_EXPORT_INCLUDES := $(import_includes_deps)
$(import_includes) : $(LOCAL_MODULE_MAKEFILE) $(import_includes_deps)
- @echo Import includes file: $@
+ @echo -e ${CL_CYN}Import includes file:${CL_RST} $@
$(hide) mkdir -p $(dir $@) && rm -f $@
ifdef import_includes_deps
$(hide) for f in $(PRIVATE_IMPORT_EXPORT_INCLUDES); do \
@@ -956,6 +980,11 @@ normal_objects := \
all_objects := $(normal_objects) $(gen_o_objects)
+## Allow a device's own headers to take precedence over global ones
+ifneq ($(TARGET_SPECIFIC_HEADER_PATH),)
+my_c_includes := $(TOPDIR)$(TARGET_SPECIFIC_HEADER_PATH) $(my_c_includes)
+endif
+
my_c_includes += $(TOPDIR)$(LOCAL_PATH) $(intermediates) $(generated_sources_dir)
ifndef LOCAL_SDK_VERSION
@@ -1120,7 +1149,7 @@ export_includes := $(intermediates)/export_includes
$(export_includes): PRIVATE_EXPORT_C_INCLUDE_DIRS := $(my_export_c_include_dirs)
# Make sure .pb.h are already generated before any dependent source files get compiled.
$(export_includes) : $(LOCAL_MODULE_MAKEFILE) $(proto_generated_headers)
- @echo Export includes file: $< -- $@
+ @echo -e ${CL_CYN}Export includes file:${CL_RST} $< -- $@
$(hide) mkdir -p $(dir $@) && rm -f $@
ifdef my_export_c_include_dirs
$(hide) for d in $(PRIVATE_EXPORT_C_INCLUDE_DIRS); do \
diff --git a/core/ccache.mk b/core/ccache.mk
index 34e5e1c..d27f5a5 100644
--- a/core/ccache.mk
+++ b/core/ccache.mk
@@ -30,7 +30,9 @@ ifneq ($(USE_CCACHE),)
# We don't really use system headers much so the rootdir is
# fine; ensures these paths are relative for all Android trees
# on a workstation.
- export CCACHE_BASEDIR := /
+ ifeq ($(CCACHE_BASEDIR),)
+ export CCACHE_BASEDIR := $(ANDROID_BUILD_TOP)
+ endif
# Workaround for ccache with clang.
# See http://petereisentraut.blogspot.com/2011/09/ccache-and-clang-part-2.html
@@ -52,6 +54,12 @@ ifneq ($(USE_CCACHE),)
ifndef CXX_WRAPPER
CXX_WRAPPER := $(ccache)
endif
+ ifeq ($(ANDROID_CCACHE_DIR), $(CCACHE_DIR))
+ ifneq ($(ANDROID_CCACHE_SIZE),)
+ ACCSIZE_RESULT := $(shell $(ccache) -M$(ANDROID_CCACHE_SIZE))
+ endif
+ endif
ccache =
+ ACCSIZE_RESULT =
endif
endif
diff --git a/core/checktree b/core/checktree
index b0b9cfa..87b1233 100755
--- a/core/checktree
+++ b/core/checktree
@@ -1,4 +1,6 @@
-#!/usr/bin/python -E
+#!/usr/bin/env python -E
+
+from __future__ import print_function
import sys, os, re
@@ -11,7 +13,7 @@ excludes = [r'.*?/\.obj.*?',
r'.*?/out/.*?',
r'.*?/install/.*?']
-excludes_compiled = map(re.compile, excludes)
+excludes_compiled = list(map(re.compile, excludes))
def filter_excludes(str):
for e in excludes_compiled:
@@ -60,9 +62,9 @@ def run(command, regex, filt):
filt_compiled = re.compile(filt)
if len(lines) >= 1:
- lines = filter(filterit, lines)
+ lines = list(filter(filterit, lines))
if len(lines) >= 1:
- return map(matchit, lines)
+ return list(map(matchit, lines))
return None
try:
@@ -71,24 +73,24 @@ try:
elif len(sys.argv) == 2 and sys.argv[1] == "-a":
do_exclude = False
else:
- print "usage: checktree [-a]"
- print " -a don't filter common crud in the tree"
+ print("usage: checktree [-a]")
+ print(" -a don't filter common crud in the tree")
sys.exit(1)
have = run("p4 have ...", r'[^#]+#[0-9]+ - (.*)', r'.*')
cwd = os.getcwd()
files = run("find . -not -type d", r'.(.*)', r'.*')
- files = map(lambda s: cwd+s, files)
+ files = [cwd+s for s in files]
added_depot_path = run("p4 opened ...", r'([^#]+)#.*', r'.*?#[0-9]+ - add .*');
added = []
if added_depot_path:
- added_depot_path = map(quotate, added_depot_path)
+ added_depot_path = list(map(quotate, added_depot_path))
where = "p4 where " + " ".join(added_depot_path)
added = run(where, r'(.*)', r'.*')
- added = map(split_perforce_parts, added)
+ added = list(map(split_perforce_parts, added))
extras = []
@@ -106,8 +108,8 @@ try:
extras = filter(filter_excludes, extras)
for s in extras:
- print s.replace(" ", "\\ ")
+ print(s.replace(" ", "\\ "))
-except PerforceError, e:
+except PerforceError as e:
sys.exit(2)
diff --git a/core/clang/HOST_x86_common.mk b/core/clang/HOST_x86_common.mk
index 74b5a69..7b3d6b5 100644
--- a/core/clang/HOST_x86_common.mk
+++ b/core/clang/HOST_x86_common.mk
@@ -11,7 +11,8 @@ endif
ifeq ($(HOST_OS),linux)
CLANG_CONFIG_x86_LINUX_HOST_EXTRA_ASFLAGS := \
--gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG) \
- --sysroot=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot
+ --sysroot=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/sysroot \
+ -B$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)/x86_64-linux/bin
CLANG_CONFIG_x86_LINUX_HOST_EXTRA_CFLAGS := \
--gcc-toolchain=$($(clang_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG)
@@ -51,4 +52,4 @@ endif # Linux
ifeq ($(HOST_OS),windows)
# nothing required here yet
-endif
+endif \ No newline at end of file
diff --git a/core/clang/arm.mk b/core/clang/arm.mk
index bf31f51..6b3d7c1 100644
--- a/core/clang/arm.mk
+++ b/core/clang/arm.mk
@@ -7,7 +7,7 @@ CLANG_CONFIG_arm_EXTRA_CFLAGS :=
ifneq (,$(filter krait,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)))
# Android's clang support's krait as a CPU whereas GCC doesn't. Specify
# -mcpu here rather than the more normal core/combo/arch/arm/armv7-a-neon.mk.
- CLANG_CONFIG_arm_EXTRA_CFLAGS += -mcpu=krait
+ CLANG_CONFIG_arm_EXTRA_CFLAGS += -mcpu=krait -mfpu=neon-vfpv4
endif
ifeq ($(HOST_OS),darwin)
diff --git a/core/clang/config.mk b/core/clang/config.mk
index e1bfb01..b887c83 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -52,6 +52,11 @@ CLANG_CONFIG_EXTRA_CFLAGS += \
CLANG_CONFIG_EXTRA_CPPFLAGS += \
-Wno-inconsistent-missing-override
+# Force clang to always output color diagnostics. Ninja will strip the ANSI
+# color codes if it is not running in a terminal.
+CLANG_CONFIG_EXTRA_CFLAGS += \
+ -fcolor-diagnostics
+
CLANG_CONFIG_UNKNOWN_CFLAGS := \
-finline-functions \
-finline-limit=64 \
@@ -77,7 +82,8 @@ CLANG_CONFIG_UNKNOWN_CFLAGS := \
-Wno-unused-but-set-variable \
-Wno-unused-local-typedefs \
-Wunused-but-set-parameter \
- -Wunused-but-set-variable
+ -Wunused-but-set-variable \
+ -fdiagnostics-color
# Clang flags for all host rules
CLANG_CONFIG_HOST_EXTRA_ASFLAGS :=
diff --git a/core/cleanbuild.mk b/core/cleanbuild.mk
index 801a292..31f6548 100644
--- a/core/cleanbuild.mk
+++ b/core/cleanbuild.mk
@@ -194,6 +194,7 @@ installclean_files := \
$(PRODUCT_OUT)/*.xlb \
$(PRODUCT_OUT)/*.zip \
$(PRODUCT_OUT)/kernel \
+ $(PRODUCT_OUT)/*.zip.md5sum \
$(PRODUCT_OUT)/data \
$(PRODUCT_OUT)/skin \
$(PRODUCT_OUT)/obj/APPS \
@@ -232,13 +233,13 @@ endif
dataclean: FILES := $(dataclean_files)
dataclean:
$(hide) rm -rf $(FILES)
- @echo "Deleted emulator userdata images."
+ @echo -e ${CL_GRN}"Deleted emulator userdata images."${CL_RST}
.PHONY: installclean
installclean: FILES := $(installclean_files)
installclean: dataclean
$(hide) rm -rf $(FILES)
- @echo "Deleted images and staging directories."
+ @echo -e ${CL_GRN}"Deleted images and staging directories."${CL_RST}
ifeq "$(force_installclean)" "true"
$(info *** Forcing "make installclean"...)
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 076fb78..1a3ce19 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -103,6 +103,7 @@ LOCAL_APK_LIBRARIES:=
LOCAL_RES_LIBRARIES:=
LOCAL_MANIFEST_INSTRUMENTATION_FOR:=
LOCAL_AIDL_INCLUDES:=
+LOCAL_AIDL_FLAGS:=
LOCAL_JARJAR_RULES:=
LOCAL_ADDITIONAL_JAVA_DIR:=
LOCAL_ALLOW_UNDEFINED_SYMBOLS:=
@@ -293,6 +294,11 @@ LOCAL_MODULE_STEM_64:=
LOCAL_CLANG_32:=
LOCAL_CLANG_64:=
+LOCAL_PROTOC_OUTPUT:=
+
+# Include any vendor specific clear_vars.mk file
+-include $(TOPDIR)vendor/*/build/core/clear_vars.mk
+
# Trim MAKEFILE_LIST so that $(call my-dir) doesn't need to
# iterate over thousands of entries every time.
# Leave the current makefile to make sure we don't break anything
diff --git a/core/combo/HOST_darwin-x86.mk b/core/combo/HOST_darwin-x86.mk
index e77fd21..5992852 100644
--- a/core/combo/HOST_darwin-x86.mk
+++ b/core/combo/HOST_darwin-x86.mk
@@ -43,7 +43,13 @@ $(combo_2nd_arch_prefix)HOST_TOOLCHAIN_FOR_CLANG := $($(combo_2nd_arch_prefix)HO
$(combo_2nd_arch_prefix)HOST_AR := $(AR)
$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version)
+ifeq (,$(wildcard $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1))
+# libc++ header locations for XCode CLT 7.1+
+$(combo_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/usr/include/c++/v1
+else
+# libc++ header locations for pre-XCode CLT 7.1+
$(combo_2nd_arch_prefix)HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1
+endif
$(combo_2nd_arch_prefix)HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version)
$(combo_2nd_arch_prefix)HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables
@@ -102,5 +108,10 @@ endef
# $(1): The file to check
define get-file-size
-stat -f "%z" $(1)
+GSTAT=$(which gstat) ; \
+if [ ! -z "$GSTAT" ]; then \
+gstat -c "%s" $(1) ; \
+else \
+stat -f "%z" $(1) ; \
+fi
endef
diff --git a/core/combo/HOST_darwin-x86_64.mk b/core/combo/HOST_darwin-x86_64.mk
index 0efa78f..324b3fd 100644
--- a/core/combo/HOST_darwin-x86_64.mk
+++ b/core/combo/HOST_darwin-x86_64.mk
@@ -43,7 +43,13 @@ HOST_TOOLCHAIN_FOR_CLANG := $(HOST_TOOLCHAIN_ROOT)
HOST_AR := $(AR)
HOST_GLOBAL_CFLAGS += -isysroot $(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version) -DMACOSX_DEPLOYMENT_TARGET=$(mac_sdk_version)
+ifeq (,$(wildcard $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1))
+# libc++ header locations for XCode CLT 7.1+
+HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/usr/include/c++/v1
+else
+# libc++ header locations for pre-XCode CLT 7.1+
HOST_GLOBAL_CPPFLAGS += -isystem $(mac_sdk_path)/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1
+endif
HOST_GLOBAL_LDFLAGS += -isysroot $(mac_sdk_root) -Wl,-syslibroot,$(mac_sdk_root) -mmacosx-version-min=$(mac_sdk_version)
HOST_GLOBAL_CFLAGS += -fPIC -funwind-tables
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index 3651c39..5020865 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -39,6 +39,7 @@ $(combo_2nd_arch_prefix)TARGET_NDK_GCC_VERSION := 4.9
ifeq ($(strip $(TARGET_GCC_VERSION_EXP)),)
$(combo_2nd_arch_prefix)TARGET_GCC_VERSION := 4.9
+$(combo_2nd_arch_prefix)TARGET_LEGACY_GCC_VERSION := 4.8
else
$(combo_2nd_arch_prefix)TARGET_GCC_VERSION := $(TARGET_GCC_VERSION_EXP)
endif
diff --git a/core/combo/arch/arm/armv7-a-neon.mk b/core/combo/arch/arm/armv7-a-neon.mk
index 99f17aa..89e6573 100644
--- a/core/combo/arch/arm/armv7-a-neon.mk
+++ b/core/combo/arch/arm/armv7-a-neon.mk
@@ -11,7 +11,7 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA
# hardware divide operations are generated. This should be removed and a
# krait CPU variant added to GCC. For clang we specify -mcpu for krait in
# core/clang/arm.mk.
- arch_variant_cflags := -mcpu=cortex-a15
+ arch_variant_cflags := -mcpu=cortex-a15 -mfpu=neon-vfpv4
# Fake an ARM compiler flag as these processors support LPAE which GCC/clang
# don't advertise.
@@ -19,24 +19,27 @@ ifneq (,$(filter cortex-a15 krait denver,$(TARGET_$(combo_2nd_arch_prefix)CPU_VA
arch_variant_ldflags := \
-Wl,--no-fix-cortex-a8
else
-ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a8)
- arch_variant_cflags := -mcpu=cortex-a8
+ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a9)
+ arch_variant_cflags := -mcpu=cortex-a9 -mfpu=neon
+else
+ifneq (,$(filter cortex-a8 scorpion,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)))
+ arch_variant_cflags := -mcpu=cortex-a8 -mfpu=neon
arch_variant_ldflags := \
-Wl,--fix-cortex-a8
else
-ifeq ($(strip $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)),cortex-a7)
- arch_variant_cflags := -mcpu=cortex-a7
+ifneq (,$(filter cortex-a7 cortex-a53 cortex-a53.a57,$(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT)))
+ arch_variant_cflags := -mcpu=cortex-a7 -mfpu=neon-vfpv4
arch_variant_ldflags := \
-Wl,--no-fix-cortex-a8
else
- arch_variant_cflags := -march=armv7-a
+ arch_variant_cflags := -march=armv7-a -mfpu=neon
# Generic ARM might be a Cortex A8 -- better safe than sorry
arch_variant_ldflags := \
-Wl,--fix-cortex-a8
endif
endif
endif
+endif
arch_variant_cflags += \
- -mfloat-abi=softfp \
- -mfpu=neon
+ -mfloat-abi=softfp
diff --git a/core/combo/mac_version.mk b/core/combo/mac_version.mk
index 6defba7..e9f0696 100644
--- a/core/combo/mac_version.mk
+++ b/core/combo/mac_version.mk
@@ -9,17 +9,19 @@ ifndef build_mac_version
build_mac_version := $(shell sw_vers -productVersion)
-mac_sdk_versions_supported := 10.6 10.7 10.8 10.9
+# Caution: Do not add 10.10 to this list until the prebuilts/darwin-x86 toolchains are updated.
+# In the meantime, setting mac_sdk_version to 10.9 works on Yosemite (verified on 10.10.1).
+mac_sdk_versions_supported := 10.6 10.7 10.8 10.9 10.11
ifneq ($(strip $(MAC_SDK_VERSION)),)
mac_sdk_version := $(MAC_SDK_VERSION)
ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),)
$(warning ****************************************************************)
-$(warning * MAC_SDK_VERSION $(MAC_SDK_VERSION) isn't one of the supported $(mac_sdk_versions_supported))
+$(warning * MAC_SDK_VERSION $(MAC_SDK_VERSION) isn\'t one of the supported $(mac_sdk_versions_supported))
$(warning ****************************************************************)
$(error Stop.)
endif
else
-mac_sdk_versions_installed := $(shell xcodebuild -showsdks | grep macosx | sort | sed -e "s/.*macosx//g")
+mac_sdk_versions_installed := $(shell xcodebuild -showsdks 2> /dev/null | grep macosx | sort | sed -e "s/.*macosx//g")
mac_sdk_version := $(firstword $(filter $(mac_sdk_versions_installed), $(mac_sdk_versions_supported)))
ifeq ($(mac_sdk_version),)
mac_sdk_version := $(firstword $(mac_sdk_versions_supported))
@@ -27,6 +29,18 @@ endif
endif
mac_sdk_path := $(shell xcode-select -print-path)
+
+ifeq ($(strip "$(mac_sdk_path)"), "/Library/Developer/CommandLineTools")
+# Accept any modern version of Apple Command Line Tools
+mac_sdk_root := /
+
+# Override mac_sdk_version with build_mac_version (aka the version of the OSX host), but assume the latest
+# supported mac_sdk_version if the build_mac_version is not recognized.
+mac_sdk_version := $(shell echo $(build_mac_version) | cut -d '.' -f 1,2)
+ifeq ($(filter $(mac_sdk_version),$(mac_sdk_versions_supported)),)
+mac_sdk_version := $(lastword $(mac_sdk_versions_supported))
+endif
+else
# try /Applications/Xcode*.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.?.sdk
# or /Volume/Xcode/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.?.sdk
mac_sdk_root := $(mac_sdk_path)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX$(mac_sdk_version).sdk
@@ -40,6 +54,7 @@ $(warning * Can not find SDK $(mac_sdk_version) at $(mac_sdk_root))
$(warning *****************************************************)
$(error Stop.)
endif
+endif # $(mac_sdk_path)
ifeq ($(mac_sdk_version),10.6)
gcc_darwin_version := 10
diff --git a/core/config.mk b/core/config.mk
index 51810aa..ab60701 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -3,6 +3,24 @@
# current configuration and platform, which
# are not specific to what is being built.
+# These may be used to trace makefile issues without interfering with
+# envsetup.sh. Usage:
+# $(call ainfo,some info message)
+# $(call aerror,some error message)
+ifdef CALLED_FROM_SETUP
+define ainfo
+endef
+define aerror
+endef
+else
+define ainfo
+$(info $(1))
+endef
+define aerror
+$(error $(1))
+endef
+endif
+
# Only use ANDROID_BUILD_SHELL to wrap around bash.
# DO NOT use other shells such as zsh.
ifdef ANDROID_BUILD_SHELL
@@ -41,7 +59,6 @@ SRC_HEADERS := \
$(TOPDIR)system/media/audio/include \
$(TOPDIR)hardware/libhardware/include \
$(TOPDIR)hardware/libhardware_legacy/include \
- $(TOPDIR)hardware/ril/include \
$(TOPDIR)libnativehelper/include \
$(TOPDIR)frameworks/native/include \
$(TOPDIR)frameworks/native/opengl/include \
@@ -118,6 +135,10 @@ COMMON_RELEASE_CFLAGS:= -DNDEBUG -UDEBUG
COMMON_GLOBAL_CPPFLAGS:= $(COMMON_GLOBAL_CFLAGS) -Wsign-promo -std=gnu++11
COMMON_RELEASE_CPPFLAGS:= $(COMMON_RELEASE_CFLAGS)
+# Force gcc to always output color diagnostics. Ninja will strip the ANSI
+# color codes if it is not running in a terminal.
+COMMON_GLOBAL_CFLAGS += -fdiagnostics-color
+
GLOBAL_CFLAGS_NO_OVERRIDE := \
-Werror=int-to-pointer-cast \
-Werror=pointer-to-int-cast \
@@ -161,6 +182,13 @@ include $(BUILD_SYSTEM)/envsetup.mk
# See envsetup.mk for a description of SCAN_EXCLUDE_DIRS
FIND_LEAVES_EXCLUDES := $(addprefix --prune=, $(OUT_DIR) $(SCAN_EXCLUDE_DIRS) .repo .git)
+# General entries for project pathmap. Any entries listed here should
+# be device and hardware independent.
+$(call project-set-path-variant,recovery,RECOVERY_VARIANT,bootable/recovery)
+
+-include vendor/extra/BoardConfigExtra.mk
+-include vendor/cm/config/BoardConfigCM.mk
+
# The build system exposes several variables for where to find the kernel
# headers:
# TARGET_DEVICE_KERNEL_HEADERS is automatically created for the current
@@ -407,7 +435,9 @@ MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
else
MKBOOTIMG := $(BOARD_CUSTOM_MKBOOTIMG)
endif
+MKYAFFS2 := $(HOST_OUT_EXECUTABLES)/mkyaffs2image$(HOST_EXECUTABLE_SUFFIX)
APICHECK := $(HOST_OUT_EXECUTABLES)/apicheck$(HOST_EXECUTABLE_SUFFIX)
+MKIMAGE := $(HOST_OUT_EXECUTABLES)/mkimage$(HOST_EXECUTABLE_SUFFIX)
FS_GET_STATS := $(HOST_OUT_EXECUTABLES)/fs_get_stats$(HOST_EXECUTABLE_SUFFIX)
MAKE_EXT4FS := $(HOST_OUT_EXECUTABLES)/make_ext4fs$(HOST_EXECUTABLE_SUFFIX)
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg.sh
@@ -535,10 +565,15 @@ else
DEFAULT_SYSTEM_DEV_CERTIFICATE := build/target/product/security/testkey
endif
+# Rules for QCOM targets
+include $(BUILD_SYSTEM)/qcom_target.mk
+
+# Rules for MTK targets
+include $(BUILD_SYSTEM)/mtk_target.mk
+
# ###############################################################
# Set up final options.
# ###############################################################
-
HOST_GLOBAL_CFLAGS += $(COMMON_GLOBAL_CFLAGS)
HOST_RELEASE_CFLAGS += $(COMMON_RELEASE_CFLAGS)
@@ -555,7 +590,8 @@ HOST_GLOBAL_LD_DIRS += -L$(HOST_OUT_INTERMEDIATE_LIBRARIES)
TARGET_GLOBAL_LD_DIRS += -L$(TARGET_OUT_INTERMEDIATE_LIBRARIES)
HOST_PROJECT_INCLUDES:= $(SRC_HEADERS) $(SRC_HOST_HEADERS) $(HOST_OUT_HEADERS)
-TARGET_PROJECT_INCLUDES:= $(SRC_HEADERS) $(TARGET_OUT_HEADERS) \
+TARGET_PROJECT_INCLUDES:= $(SRC_HEADERS) $(TOPDIR)$(call project-path-for,ril)/include \
+ $(TARGET_OUT_HEADERS) \
$(TARGET_DEVICE_KERNEL_HEADERS) $(TARGET_BOARD_KERNEL_HEADERS) \
$(TARGET_PRODUCT_KERNEL_HEADERS)
@@ -686,4 +722,28 @@ endif
RSCOMPAT_32BIT_ONLY_API_LEVELS := 8 9 10 11 12 13 14 15 16 17 18 19 20
RSCOMPAT_NO_USAGEIO_API_LEVELS := 8 9 10 11 12 13
+# We might want to skip items listed in PRODUCT_COPY_FILES based on
+# various target flags. This is useful for replacing a binary module with one
+# built from source. This should be a list of destination files under $OUT
+#
+TARGET_COPY_FILES_OVERRIDES := \
+ $(addprefix %:, $(strip $(TARGET_COPY_FILES_OVERRIDES)))
+
+ifneq ($(TARGET_COPY_FILES_OVERRIDES),)
+ PRODUCT_COPY_FILES := $(filter-out $(TARGET_COPY_FILES_OVERRIDES), $(PRODUCT_COPY_FILES))
+endif
+
+ifneq ($(CM_BUILD),)
+## We need to be sure the global selinux policies are included
+## last, to avoid accidental resetting by device configs
+$(eval include vendor/cm/sepolicy/sepolicy.mk)
+
+# Include any vendor specific config.mk file
+-include $(TOPDIR)vendor/*/build/core/config.mk
+
+# Include any vendor specific apicheck.mk file
+-include $(TOPDIR)vendor/*/build/core/apicheck.mk
+
+endif
+
include $(BUILD_SYSTEM)/dumpvar.mk
diff --git a/core/definitions.mk b/core/definitions.mk
index 9dea18c..2db4e1b 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -394,6 +394,10 @@ define find-other-java-files
$(call all-java-files-under,$(1))
endef
+define find-other-aidl-files
+ $(call find-subdir-files,$(1) -name "*.aidl" -and -not -name ".*")
+endef
+
define find-other-html-files
$(call all-html-files-under,$(1))
endef
@@ -1051,7 +1055,7 @@ endef
define transform-cpp-to-o
@mkdir -p $(dir $@)
-@echo "target $(PRIVATE_ARM_MODE) C++: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_GRN}"target $(PRIVATE_ARM_MODE) C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(hide) $(PRIVATE_CXX) \
$(addprefix -I , $(PRIVATE_C_INCLUDES)) \
$(shell cat $(PRIVATE_IMPORT_INCLUDES)) \
@@ -1103,7 +1107,7 @@ $(hide) $(PRIVATE_CC) \
endef
define transform-c-to-o-no-deps
-@echo "target $(PRIVATE_ARM_MODE) C: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_GRN}"target $(PRIVATE_ARM_MODE) C:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-c-or-s-to-o-no-deps, \
$(PRIVATE_CFLAGS) \
$(PRIVATE_CONLYFLAGS) \
@@ -1112,7 +1116,7 @@ $(call transform-c-or-s-to-o-no-deps, \
endef
define transform-s-to-o-no-deps
-@echo "target asm: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_GRN}"target asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-c-or-s-to-o-no-deps, $(PRIVATE_ASFLAGS))
endef
@@ -1143,7 +1147,7 @@ endef
###########################################################
define transform-m-to-o-no-deps
-@echo "target ObjC: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_GRN}"target ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_DEBUG_CFLAGS))
endef
@@ -1158,7 +1162,7 @@ endef
define transform-host-cpp-to-o
@mkdir -p $(dir $@)
-@echo "host C++: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_YLW}"host C++:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(hide) $(PRIVATE_CXX) \
$(addprefix -I , $(PRIVATE_C_INCLUDES)) \
$(shell cat $(PRIVATE_IMPORT_INCLUDES)) \
@@ -1205,12 +1209,12 @@ $(hide) $(PRIVATE_CC) \
endef
define transform-host-c-to-o-no-deps
-@echo "host C: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_YLW}"host C:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_CONLYFLAGS) $(PRIVATE_DEBUG_CFLAGS))
endef
define transform-host-s-to-o-no-deps
-@echo "host asm: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_YLW}"host asm:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_ASFLAGS))
endef
@@ -1229,7 +1233,7 @@ endef
###########################################################
define transform-host-m-to-o-no-deps
-@echo "host ObjC: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_YLW}"host ObjC:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
$(call transform-host-c-or-s-to-o-no-deps, $(PRIVATE_CFLAGS) $(PRIVATE_DEBUG_CFLAGS))
endef
@@ -1322,7 +1326,7 @@ endef
# $(1): the full path of the source static library.
define _extract-and-include-single-target-whole-static-lib
-@echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(strip $(1))]"
+@echo -e ${CL_YLW}"preparing StaticLib:"${CL_RST}" $(PRIVATE_MODULE) [including $(1)]"
$(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
rm -rf $$ldir; \
mkdir -p $$ldir; \
@@ -1366,7 +1370,7 @@ define transform-o-to-static-lib
@mkdir -p $(dir $@)
@rm -f $@
$(extract-and-include-target-whole-static-libs)
-@echo "target StaticLib: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target StaticLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) \
$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
$(PRIVATE_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS))
@@ -1378,7 +1382,7 @@ endef
# $(1): the full path of the source static library.
define _extract-and-include-single-host-whole-static-lib
-@echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(strip $(1))]"
+@echo -e ${CL_YLW}"preparing StaticLib:"${CL_RST}" $(PRIVATE_MODULE) [including $(1)]"
$(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
rm -rf $$ldir; \
mkdir -p $$ldir; \
@@ -1415,7 +1419,7 @@ define transform-host-o-to-static-lib
@mkdir -p $(dir $@)
@rm -f $@
$(extract-and-include-host-whole-static-libs)
-@echo "host StaticLib: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_YLW}"host StaticLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_AR) \
$($(PRIVATE_2ND_ARCH_VAR_PREFIX)HOST_GLOBAL_ARFLAGS) \
$(PRIVATE_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS))
@@ -1457,13 +1461,13 @@ endif
define transform-host-o-to-shared-lib
@mkdir -p $(dir $@)
-@echo "host SharedLib: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_YLW}"host SharedLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-host-o-to-shared-lib-inner)
endef
define transform-host-o-to-package
@mkdir -p $(dir $@)
-@echo "host Package: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_YLW}"host Package:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-host-o-to-shared-lib-inner)
endef
@@ -1500,7 +1504,7 @@ endef
define transform-o-to-shared-lib
@mkdir -p $(dir $@)
-@echo "target SharedLib: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target SharedLib:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-o-to-shared-lib-inner)
endef
@@ -1516,14 +1520,14 @@ endif
define transform-to-stripped
@mkdir -p $(dir $@)
-@echo "target Strip: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target Strip:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(hide) $(PRIVATE_STRIP) --strip-all $< -o $@ \
$(if $(PRIVATE_NO_DEBUGLINK),,$(TARGET_STRIP_EXTRA))
endef
define transform-to-stripped-keep-symbols
@mkdir -p $(dir $@)
-@echo "target Strip (keep symbols): $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target Strip (keep symbols):"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(hide) $(PRIVATE_OBJCOPY) \
`$(PRIVATE_READELF) -S $< | awk '/.debug_/ {print "-R " $$2}' | xargs` \
$(TARGET_STRIP_KEEP_SYMBOLS_EXTRA) $< $@
@@ -1573,7 +1577,7 @@ endef
define transform-o-to-executable
@mkdir -p $(dir $@)
-@echo "target Executable: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target Executable:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-o-to-executable-inner)
endef
@@ -1617,7 +1621,7 @@ endef
define transform-o-to-static-executable
@mkdir -p $(dir $@)
-@echo "target StaticExecutable: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_GRN}"target StaticExecutable:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-o-to-static-executable-inner)
endef
@@ -1663,7 +1667,7 @@ endif
define transform-host-o-to-executable
@mkdir -p $(dir $@)
-@echo "host Executable: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_YLW}"host Executable:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(transform-host-o-to-executable-inner)
endef
@@ -1803,7 +1807,13 @@ $(hide) if [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq ] ; the
-extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR) \
$(PRIVATE_JAVACFLAGS) \
\@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq \
- || ( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 ) \
+ 2>$(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr \
+ && ( [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ] && \
+ echo -e ${CL_YLW}"`cat $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr`"${CL_RST} 1>&2; \
+ rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ) \
+ || ( [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr ] && \
+ echo -e ${CL_RED}"`cat $(PRIVATE_CLASS_INTERMEDIATES_DIR)/stderr`"${CL_RST} 1>&2; \
+ rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR); exit 41 ) \
fi
$(if $(PRIVATE_JAVA_LAYERS_FILE), $(hide) build/tools/java-layers.py \
$(PRIVATE_JAVA_LAYERS_FILE) \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq,)
@@ -1832,7 +1842,7 @@ $(if $(PRIVATE_EXTRA_JAR_ARGS),$(call add-java-resources-to,$@))
endef
define transform-java-to-classes.jar
-@echo "target Java: $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))"
+@echo -e ${CL_GRN}"target Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))"
$(call compile-java,$(TARGET_JAVAC),$(PRIVATE_BOOTCLASSPATH))
endef
@@ -1987,11 +1997,11 @@ endef
# only core.jar and framework.jar need a heap this big.
# Avoid the memory arguments on Windows, dx fails to load for some reason with them.
define transform-classes.jar-to-dex
-@echo "target Dex: $(PRIVATE_MODULE)"
+@echo -e ${CL_GRN}"target Dex:"${CL_RST}" $(PRIVATE_MODULE)"
@mkdir -p $(dir $@)
$(hide) rm -f $(dir $@)classes*.dex
$(hide) $(DX) \
- $(if $(findstring windows,$(HOST_OS)),,-JXms16M -JXmx2048M) \
+ $(if $(findstring windows,$(HOST_OS)),,-JXms16M -JXmx$(if $(call streq,$(HOST_BITS),32),1024,2048)M) \
--dex --output=$(dir $@) \
$(if $(NO_OPTIMIZE_DX), \
--no-optimize) \
@@ -2170,7 +2180,7 @@ endef
# Note: we intentionally don't clean PRIVATE_CLASS_INTERMEDIATES_DIR
# in transform-java-to-classes for the sake of vm-tests.
define transform-host-java-to-package
-@echo "host Java: $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))"
+@echo -e ${CL_YLW}"host Java:"${CL_RST}" $(PRIVATE_MODULE) ($(PRIVATE_CLASS_INTERMEDIATES_DIR))"
$(call compile-java,$(HOST_JAVAC),$(PRIVATE_BOOTCLASSPATH))
endef
@@ -2183,7 +2193,7 @@ endef
# $(2): destination header
define copy-one-header
$(2): $(1)
- @echo "Header: $$@"
+ @echo -e ${CL_YLW}"Header:"${CL_RST}" $$@"
$$(copy-file-to-new-target-with-cp)
endef
@@ -2192,7 +2202,7 @@ endef
# $(2): destination file
define copy-one-file
$(2): $(1) | $(ACP)
- @echo "Copy: $$@"
+ @echo -e ${CL_YLW}"Copy:"${CL_RST}" $$@"
$$(copy-file-to-target)
endef
@@ -2213,7 +2223,7 @@ endef
# $(2): destination file, must end with .xml.
define copy-xml-file-checked
$(2): $(1) | $(ACP)
- @echo "Copy xml: $$@"
+ @echo -e ${CL_YLW}"Copy xml:"${CL_RST}" $$@"
$(hide) xmllint $$< >/dev/null # Don't print the xml file to stdout.
$$(copy-file-to-target)
endef
@@ -2271,19 +2281,19 @@ endef
# Copy a prebuilt file to a target location.
define transform-prebuilt-to-target
-@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target)
endef
# Copy a prebuilt file to a target location, using zipalign on it.
define transform-prebuilt-to-target-with-zipalign
-@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt APK: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt APK:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target-with-zipalign)
endef
# Copy a prebuilt file to a target location, stripping "# comment" comments.
define transform-prebuilt-to-target-strip-comments
-@echo "$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt: $(PRIVATE_MODULE) ($@)"
+@echo -e ${CL_CYN}"$(if $(PRIVATE_IS_HOST_MODULE),host,target) Prebuilt:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target-strip-comments)
endef
@@ -2302,8 +2312,9 @@ endef
###########################################################
## Commands to call Proguard
###########################################################
+@echo -e ${CL_CYN}"Copying:"${CL_RST}" $@"
+@echo -e ${CL_GRN}"Proguard:"${CL_RST}" $@"
define transform-jar-to-proguard
-@echo Proguard: $@
$(hide) $(PROGUARD) -injars $< -outjars $@ $(PRIVATE_PROGUARD_FLAGS) \
$(addprefix -injars , $(PRIVATE_EXTRA_INPUT_JAR))
endef
@@ -2313,7 +2324,7 @@ endef
###########################################################
define transform-generated-source
-@echo "target Generated: $(PRIVATE_MODULE) <= $<"
+@echo -e ${CL_GRN}"target Generated:"${CL_RST}" $(PRIVATE_MODULE) <= $<"
@mkdir -p $(dir $@)
$(hide) $(PRIVATE_CUSTOM_TOOL)
endef
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 73c8146..e6b4f3c 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -108,7 +108,7 @@ $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
$(installed_odex) : $(dir $(LOCAL_INSTALLED_MODULE))%$(notdir $(word 1,$(installed_odex))) \
: $(dir $(LOCAL_BUILT_MODULE))%$(notdir $(word 1,$(built_odex))) \
| $(ACP)
- @echo "Install: $@"
+ @echo -e ${CL_CYN}"Install: $@"${CL_RST}
$(copy-file-to-target)
endif
diff --git a/core/distdir.mk b/core/distdir.mk
index 51ec46e..829951e 100644
--- a/core/distdir.mk
+++ b/core/distdir.mk
@@ -37,7 +37,7 @@ ifdef dist_goal
define copy-one-dist-file
$(3): $(2)
$(2): $(1)
- @echo "Dist: $$@"
+ @echo -e ${CL_YLW}"Dist:"${CL_RST}" $$@"
$$(copy-file-to-new-target-with-cp)
endef
diff --git a/core/droiddoc.mk b/core/droiddoc.mk
index 41f135c..04161f9 100644
--- a/core/droiddoc.mk
+++ b/core/droiddoc.mk
@@ -162,7 +162,7 @@ $(full_target): \
$(full_java_lib_deps) \
$(LOCAL_MODULE_MAKEFILE) \
$(LOCAL_ADDITIONAL_DEPENDENCIES)
- @echo Docs droiddoc: $(PRIVATE_OUT_DIR)
+ @echo -e ${CL_YLW}"Docs droiddoc:"${CL_RST}" $(PRIVATE_OUT_DIR)"
$(hide) mkdir -p $(dir $@)
$(call prepare-doc-source-list,$(PRIVATE_SRC_LIST_FILE),$(PRIVATE_JAVA_FILES), \
$(PRIVATE_SOURCE_INTERMEDIATES_DIR) $(PRIVATE_ADDITIONAL_JAVA_DIR))
@@ -197,7 +197,7 @@ else
##
##
$(full_target): $(full_src_files) $(full_java_lib_deps)
- @echo Docs javadoc: $(PRIVATE_OUT_DIR)
+ @echo -e ${CL_YLW}"Docs javadoc:"${CL_RST}" $(PRIVATE_OUT_DIR)"
@mkdir -p $(dir $@)
$(call prepare-doc-source-list,$(PRIVATE_SRC_LIST_FILE),$(PRIVATE_JAVA_FILES), \
$(PRIVATE_SOURCE_INTERMEDIATES_DIR) $(PRIVATE_ADDITIONAL_JAVA_DIR))
@@ -237,7 +237,7 @@ ifeq ($(strip $(LOCAL_UNINSTALLABLE_MODULE)),)
out_zip := $(OUT_DOCS)/$(LOCAL_MODULE)-docs.zip
$(out_zip): PRIVATE_DOCS_DIR := $(out_dir)
$(out_zip): $(full_target)
- @echo Package docs: $@
+ @echo -e ${CL_YLW}"Package docs:"${CL_RST}" $@"
@rm -f $@
@mkdir -p $(dir $@)
$(hide) ( F=$$(pwd)/$@ ; cd $(PRIVATE_DOCS_DIR) && zip -rq $$F * )
diff --git a/core/dumpvar.mk b/core/dumpvar.mk
index 510bc7d..47ba476 100644
--- a/core/dumpvar.mk
+++ b/core/dumpvar.mk
@@ -67,6 +67,7 @@ HOST_OS_EXTRA:=$(shell python -c "import platform; print(platform.platform())")
$(info ============================================)
$(info PLATFORM_VERSION_CODENAME=$(PLATFORM_VERSION_CODENAME))
$(info PLATFORM_VERSION=$(PLATFORM_VERSION))
+$(info CM_VERSION=$(CM_VERSION))
$(info TARGET_PRODUCT=$(TARGET_PRODUCT))
$(info TARGET_BUILD_VARIANT=$(TARGET_BUILD_VARIANT))
$(info TARGET_BUILD_TYPE=$(TARGET_BUILD_TYPE))
@@ -83,5 +84,9 @@ $(info HOST_OS_EXTRA=$(HOST_OS_EXTRA))
$(info HOST_BUILD_TYPE=$(HOST_BUILD_TYPE))
$(info BUILD_ID=$(BUILD_ID))
$(info OUT_DIR=$(OUT_DIR))
+ifeq ($(CYNGN_TARGET),true)
+$(info CYNGN_TARGET=$(CYNGN_TARGET))
+$(info CYNGN_FEATURES=$(CYNGN_FEATURES))
+endif
$(info ============================================)
endif
diff --git a/core/dynamic_binary.mk b/core/dynamic_binary.mk
index 38c0cbe..a4224cd 100644
--- a/core/dynamic_binary.mk
+++ b/core/dynamic_binary.mk
@@ -57,6 +57,14 @@ ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES)
my_pack_module_relocations := false
endif
+# Likewise for recovery and utility executables
+ifeq ($(LOCAL_MODULE_CLASS),RECOVERY_EXECUTABLES)
+ my_pack_module_relocations := false
+endif
+ifeq ($(LOCAL_MODULE_CLASS),UTILITY_EXECUTABLES)
+ my_pack_module_relocations := false
+endif
+
# TODO (dimitry): Relocation packer is not yet available for darwin
ifneq ($(HOST_OS),linux)
my_pack_module_relocations := false
@@ -83,7 +91,7 @@ endif
symbolic_input := $(relocation_packer_output)
symbolic_output := $(my_unstripped_path)/$(my_installed_module_stem)
$(symbolic_output) : $(symbolic_input) | $(ACP)
- @echo "target Symbolic: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"target Symbolic:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target)
@@ -132,11 +140,11 @@ else
# use cp(1) instead.
ifneq ($(LOCAL_ACP_UNAVAILABLE),true)
$(strip_output): $(strip_input) | $(ACP)
- @echo "target Unstripped: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"target Unstripped:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target)
else
$(strip_output): $(strip_input)
- @echo "target Unstripped: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"target Unstripped:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target-with-cp)
endif
endif # my_strip_module
diff --git a/core/envsetup.mk b/core/envsetup.mk
index bf04455..6476934 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -178,8 +178,8 @@ board_config_mk :=
# Now we can substitute with the real value of TARGET_COPY_OUT_VENDOR
ifeq ($(TARGET_COPY_OUT_VENDOR),$(_vendor_path_placeholder))
TARGET_COPY_OUT_VENDOR := system/vendor
-else ifeq ($(filter vendor system/vendor,$(TARGET_COPY_OUT_VENDOR)),)
-$(error TARGET_COPY_OUT_VENDOR must be either 'vendor' or 'system/vendor', seeing '$(TARGET_COPY_OUT_VENDOR)'.)
+else ifeq ($(filter vendor system/vendor system,$(TARGET_COPY_OUT_VENDOR)),)
+$(error TARGET_COPY_OUT_VENDOR must be either 'vendor', 'system/vendor' or 'system', seeing '$(TARGET_COPY_OUT_VENDOR)'.)
endif
PRODUCT_COPY_FILES := $(subst $(_vendor_path_placeholder),$(TARGET_COPY_OUT_VENDOR),$(PRODUCT_COPY_FILES))
###########################################
@@ -207,8 +207,12 @@ endif
ifeq (,$(strip $(OUT_DIR)))
ifeq (,$(strip $(OUT_DIR_COMMON_BASE)))
+ifneq ($(TOPDIR),)
OUT_DIR := $(TOPDIR)out
else
+OUT_DIR := $(CURDIR)/out
+endif
+else
OUT_DIR := $(OUT_DIR_COMMON_BASE)/$(notdir $(PWD))
endif
endif
diff --git a/core/generate_extra_images.mk b/core/generate_extra_images.mk
new file mode 100644
index 0000000..f7fb0c5
--- /dev/null
+++ b/core/generate_extra_images.mk
@@ -0,0 +1,118 @@
+# This makefile is used to generate extra images for QCOM targets
+# persist, device tree & NAND images required for different QCOM targets.
+
+# These variables are required to make sure that the required
+# files/targets are available before generating NAND images.
+# This file is included from device/qcom/<TARGET>/AndroidBoard.mk
+# and gets parsed before build/core/Makefile, which has these
+# variables defined. build/core/Makefile will overwrite these
+# variables again.
+INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
+INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
+INSTALLED_USERDATAIMAGE_TARGET := $(PRODUCT_OUT)/userdata.img
+
+#----------------------------------------------------------------------
+# Generate secure boot & recovery image
+#----------------------------------------------------------------------
+ifeq ($(TARGET_BOOTIMG_SIGNED),true)
+INSTALLED_SEC_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img.secure
+INSTALLED_SEC_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img.secure
+
+ifneq ($(BUILD_TINY_ANDROID),true)
+intermediates := $(call intermediates-dir-for,PACKAGING,recovery_patch)
+RECOVERY_FROM_BOOT_PATCH := $(intermediates)/recovery_from_boot.p
+endif
+
+ifndef TARGET_SHA_TYPE
+ TARGET_SHA_TYPE := sha256
+endif
+
+define build-sec-image
+ $(hide) mv -f $(1) $(1).nonsecure
+ $(hide) openssl dgst -$(TARGET_SHA_TYPE) -binary $(1).nonsecure > $(1).$(TARGET_SHA_TYPE)
+ $(hide) openssl rsautl -sign -in $(1).$(TARGET_SHA_TYPE) -inkey $(PRODUCT_PRIVATE_KEY) -out $(1).sig
+ $(hide) dd if=/dev/zero of=$(1).sig.padded bs=$(BOARD_KERNEL_PAGESIZE) count=1
+ $(hide) dd if=$(1).sig of=$(1).sig.padded conv=notrunc
+ $(hide) cat $(1).nonsecure $(1).sig.padded > $(1).secure
+ $(hide) rm -rf $(1).$(TARGET_SHA_TYPE) $(1).sig $(1).sig.padded
+ $(hide) mv -f $(1).secure $(1)
+endef
+
+$(INSTALLED_SEC_BOOTIMAGE_TARGET): $(INSTALLED_BOOTIMAGE_TARGET) $(RECOVERY_FROM_BOOT_PATCH)
+ $(hide) $(call build-sec-image,$(INSTALLED_BOOTIMAGE_TARGET))
+
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SEC_BOOTIMAGE_TARGET)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_SEC_BOOTIMAGE_TARGET)
+
+$(INSTALLED_SEC_RECOVERYIMAGE_TARGET): $(INSTALLED_RECOVERYIMAGE_TARGET) $(RECOVERY_FROM_BOOT_PATCH)
+ $(hide) $(call build-sec-image,$(INSTALLED_RECOVERYIMAGE_TARGET))
+
+ifneq ($(BUILD_TINY_ANDROID),true)
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_SEC_RECOVERYIMAGE_TARGET)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_SEC_RECOVERYIMAGE_TARGET)
+endif # !BUILD_TINY_ANDROID
+endif # TARGET_BOOTIMG_SIGNED
+
+#----------------------------------------------------------------------
+# Generate persist image (persist.img)
+#----------------------------------------------------------------------
+TARGET_OUT_PERSIST := $(PRODUCT_OUT)/persist
+
+INTERNAL_PERSISTIMAGE_FILES := \
+ $(filter $(TARGET_OUT_PERSIST)/%,$(ALL_DEFAULT_INSTALLED_MODULES))
+
+INSTALLED_PERSISTIMAGE_TARGET := $(PRODUCT_OUT)/persist.img
+
+define build-persistimage-target
+ $(call pretty,"Target persist fs image: $(INSTALLED_PERSISTIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_PERSIST)
+ $(hide) $(MKEXTUSERIMG) -s $(TARGET_OUT_PERSIST) $@ ext4 persist $(BOARD_PERSISTIMAGE_PARTITION_SIZE)
+ $(hide) chmod a+r $@
+ $(hide) $(call assert-max-image-size,$@,$(BOARD_PERSISTIMAGE_PARTITION_SIZE),yaffs)
+endef
+
+$(INSTALLED_PERSISTIMAGE_TARGET): $(MKEXTUSERIMG) $(MAKE_EXT4FS) $(INTERNAL_PERSISTIMAGE_FILES)
+ $(build-persistimage-target)
+
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_PERSISTIMAGE_TARGET)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_PERSISTIMAGE_TARGET)
+
+#----------------------------------------------------------------------
+# Generate extra userdata images (for variants with multiple mmc sizes)
+#----------------------------------------------------------------------
+ifneq ($(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),)
+
+ifndef BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME
+ BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME := extra
+endif
+
+BUILT_USERDATAEXTRAIMAGE_TARGET := $(PRODUCT_OUT)/userdata_$(BOARD_USERDATAEXTRAIMAGE_PARTITION_NAME).img
+
+define build-userdataextraimage-target
+ $(call pretty,"Target EXTRA userdata fs image: $(INSTALLED_USERDATAEXTRAIMAGE_TARGET)")
+ @mkdir -p $(TARGET_OUT_DATA)
+ $(hide) $(MKEXTUSERIMG) -s $(TARGET_OUT_DATA) $@ ext4 data $(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE)
+ $(hide) chmod a+r $@
+ $(hide) $(call assert-max-image-size,$@,$(BOARD_USERDATAEXTRAIMAGE_PARTITION_SIZE),yaffs)
+endef
+
+INSTALLED_USERDATAEXTRAIMAGE_TARGET := $(BUILT_USERDATAEXTRAIMAGE_TARGET)
+$(INSTALLED_USERDATAEXTRAIMAGE_TARGET): $(INSTALLED_USERDATAIMAGE_TARGET)
+ $(build-userdataextraimage-target)
+
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_USERDATAEXTRAIMAGE_TARGET)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_USERDATAEXTRAIMAGE_TARGET)
+
+endif
+
+.PHONY: aboot
+aboot: $(INSTALLED_BOOTLOADER_MODULE)
+
+.PHONY: sec_bootimage
+sec_bootimage: $(INSTALLED_BOOTIMAGE_TARGET) $(INSTALLED_SEC_BOOTIMAGE_TARGET)
+
+.PHONY: sec_recoveryimage
+sec_recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET) $(INSTALLED_SEC_RECOVERYIMAGE_TARGET)
+
+.PHONY: persistimage
+persistimage: $(INSTALLED_PERSISTIMAGE_TARGET)
diff --git a/core/host_java_library.mk b/core/host_java_library.mk
index 47189d7..2a2fc7d 100644
--- a/core/host_java_library.mk
+++ b/core/host_java_library.mk
@@ -61,7 +61,7 @@ $(full_classes_emma_jar) : $(full_classes_compiled_jar) | $(EMMA_JAR)
$(transform-classes.jar-to-emma)
$(built_javalib_jar) : $(full_classes_emma_jar)
- @echo Copying: $@
+ @echo -e ${CL_YLW}"Copying:"${CL_RST}" $@"
$(hide) $(ACP) -fp $< $@
else # LOCAL_EMMA_INSTRUMENT
@@ -70,7 +70,6 @@ full_classes_compiled_jar := $(built_javalib_jar)
endif # LOCAL_EMMA_INSTRUMENT
$(full_classes_compiled_jar): PRIVATE_JAVAC_DEBUG_FLAGS := -g
-
# The layers file allows you to enforce a layering between java packages.
# Run build/tools/java-layers.py for more details.
layers_file := $(addprefix $(LOCAL_PATH)/, $(LOCAL_JAVA_LAYERS_FILE))
diff --git a/core/java.mk b/core/java.mk
index bac5ca7..f99e6ef 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -358,7 +358,7 @@ ifdef full_classes_jar
# PRIVATE_ vars to be preserved.
$(full_classes_stubs_jar): PRIVATE_SOURCE_FILE := $(full_classes_jar)
$(full_classes_stubs_jar) : $(full_classes_jar) | $(ACP)
- @echo Copying $(PRIVATE_SOURCE_FILE)
+ @echo -e ${CL_GRN}"Copying"${CL_RST}" $(PRIVATE_SOURCE_FILE)"
$(hide) $(ACP) -fp $(PRIVATE_SOURCE_FILE) $@
ALL_MODULES.$(LOCAL_MODULE).STUBS := $(full_classes_stubs_jar)
@@ -399,11 +399,11 @@ $(full_classes_compiled_jar): PRIVATE_JAVAC_DEBUG_FLAGS := -g
ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
$(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
$(full_classes_jarjar_jar): $(full_classes_compiled_jar) $(LOCAL_JARJAR_RULES) | $(JARJAR)
- @echo JarJar: $@
+ @echo -e ${CL_GRN}"JarJar:"${CL_RST}" $@"
$(hide) java -jar $(JARJAR) process $(PRIVATE_JARJAR_RULES) $< $@
else
$(full_classes_jarjar_jar): $(full_classes_compiled_jar) | $(ACP)
- @echo Copying: $@
+ @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@"
$(hide) $(ACP) -fp $< $@
endif
@@ -426,13 +426,13 @@ $(full_classes_emma_jar): $(full_classes_jarjar_jar) | $(EMMA_JAR)
else
$(full_classes_emma_jar): $(full_classes_jarjar_jar) | $(ACP)
- @echo Copying: $@
+ @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@"
$(copy-file-to-target)
endif
# Keep a copy of the jar just before proguard processing.
$(full_classes_jar): $(full_classes_emma_jar) | $(ACP)
- @echo Copying: $@
+ @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@"
$(hide) $(ACP) -fp $< $@
# Run proguard if necessary, otherwise just copy the file.
@@ -561,7 +561,7 @@ $(built_dex_intermediate): $(full_classes_proguard_jar) $(DX)
endif # LOCAL_JACK_ENABLED is disabled
$(built_dex): $(built_dex_intermediate) | $(ACP)
- @echo Copying: $@
+ @echo -e ${CL_GRN}"Copying:"${CL_RST}" $@"
$(hide) mkdir -p $(dir $@)
$(hide) rm -f $(dir $@)/classes*.dex
$(hide) $(ACP) -fp $(dir $<)/classes*.dex $(dir $@)
diff --git a/core/java_library.mk b/core/java_library.mk
index 5a2d19b..a954d87 100644
--- a/core/java_library.mk
+++ b/core/java_library.mk
@@ -66,7 +66,7 @@ $(common_javalib.jar) : $(full_classes_proguard_jar)
else
$(common_javalib.jar) : $(full_classes_jar)
endif
- @echo "target Static Jar: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"target Static Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-target)
ifdef LOCAL_JACK_ENABLED
@@ -82,7 +82,7 @@ $(common_javalib.jar): PRIVATE_DEX_FILE := $(built_dex)
$(common_javalib.jar): PRIVATE_SOURCE_ARCHIVE := $(full_classes_jarjar_jar)
$(common_javalib.jar): PRIVATE_DONT_DELETE_JAR_DIRS := $(LOCAL_DONT_DELETE_JAR_DIRS)
$(common_javalib.jar) : $(built_dex) $(java_resource_sources)
- @echo "target Jar: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"target Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
ifdef LOCAL_JACK_ENABLED
$(create-empty-package)
else
@@ -105,7 +105,7 @@ else # ! boot jar
$(built_odex): PRIVATE_MODULE := $(LOCAL_MODULE)
# Use pattern rule - we may have multiple built odex files.
$(built_odex) : $(dir $(LOCAL_BUILT_MODULE))% : $(common_javalib.jar)
- @echo "Dexpreopt Jar: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_GRN}"Dexpreopt Jar:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(call dexpreopt-one-file,$<,$@)
$(LOCAL_BUILT_MODULE) : $(common_javalib.jar) | $(ACP)
diff --git a/core/main.mk b/core/main.mk
index a6f829a..6560375 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -96,16 +96,26 @@ include $(BUILD_SYSTEM)/config.mk
# CTS-specific config.
-include cts/build/config.mk
+# CMTS-specific config.
+-include vendor/cmts/build/config.mk
+
# This allows us to force a clean build - included after the config.mk
# environment setup is done, but before we generate any dependencies. This
# file does the rm -rf inline so the deps which are all done below will
# be generated correctly
include $(BUILD_SYSTEM)/cleanbuild.mk
+# Bring in Qualcomm helper macros
+include $(BUILD_SYSTEM)/qcom_utils.mk
+
+# Bring in Mediatek helper macros too
+include $(BUILD_SYSTEM)/mtk_utils.mk
+
# Include the google-specific config
-include vendor/google/build/config.mk
VERSION_CHECK_SEQUENCE_NUMBER := 5
+
-include $(OUT_DIR)/versions_checked.mk
ifneq ($(VERSION_CHECK_SEQUENCE_NUMBER),$(VERSIONS_CHECKED))
@@ -143,19 +153,11 @@ endif
java_version_str := $(shell unset _JAVA_OPTIONS && java -version 2>&1)
javac_version_str := $(shell unset _JAVA_OPTIONS && javac -version 2>&1)
-# Check for the correct version of java, should be 1.7 by
-# default, and 1.8 if EXPERIMENTAL_USE_JAVA8 is set
-ifneq ($(EXPERIMENTAL_USE_JAVA8),)
-required_version := "1.8.x"
-required_javac_version := "1.8"
-java_version := $(shell echo '$(java_version_str)' | grep 'openjdk .*[ "]1\.8[\. "$$]')
-javac_version := $(shell echo '$(javac_version_str)' | grep '[ "]1\.8[\. "$$]')
-else # default
-required_version := "1.7.x"
-required_javac_version := "1.7"
-java_version := $(shell echo '$(java_version_str)' | grep '^java .*[ "]1\.7[\. "$$]')
-javac_version := $(shell echo '$(javac_version_str)' | grep '[ "]1\.7[\. "$$]')
-endif # if EXPERIMENTAL_USE_JAVA8
+# Check for the correct version of java, should be 1.7 or 1.8
+required_version := "1.7.x or 1.8.x"
+required_javac_version := "1.7 or 1.8"
+java_version := $(shell echo '$(java_version_str)' | grep '[ "]1\.[78][\. "$$]')
+javac_version := $(shell echo '$(javac_version_str)' | grep '[ "]1\.[78][\. "$$]')
ifeq ($(strip $(java_version)),)
$(info ************************************************************)
@@ -171,39 +173,6 @@ $(info ************************************************************)
$(error stop)
endif
-# Check for the current JDK.
-#
-# For Java 1.7, we require OpenJDK on linux and Oracle JDK on Mac OS.
-requires_openjdk := false
-ifeq ($(HOST_OS), linux)
-requires_openjdk := true
-endif
-
-
-# Check for the current jdk
-ifeq ($(requires_openjdk), true)
-# The user asked for java7 openjdk, so check that the host
-# java version is really openjdk
-ifeq ($(shell echo '$(java_version_str)' | grep -i openjdk),)
-$(info ************************************************************)
-$(info You asked for an OpenJDK 7 build but your version is)
-$(info $(java_version_str).)
-$(info ************************************************************)
-$(error stop)
-endif # java version is not OpenJdk
-else # if requires_openjdk
-ifneq ($(shell echo '$(java_version_str)' | grep -i openjdk),)
-$(info ************************************************************)
-$(info You are attempting to build with an unsupported JDK.)
-$(info $(space))
-$(info You use OpenJDK but only Sun/Oracle JDK is supported.)
-$(info Please follow the machine setup instructions at)
-$(info $(space)$(space)$(space)$(space)https://source.android.com/source/download.html)
-$(info ************************************************************)
-$(error stop)
-endif # java version is not Sun Oracle JDK
-endif # if requires_openjdk
-
# Check for the correct version of javac
ifeq ($(strip $(javac_version)),)
$(info ************************************************************)
@@ -295,7 +264,7 @@ endif
# Add build properties for ART. These define system properties used by installd
# to pass flags to dex2oat.
-ADDITIONAL_BUILD_PROPERTIES += persist.sys.dalvik.vm.lib.2=libart
+ADDITIONAL_BUILD_PROPERTIES += persist.sys.dalvik.vm.lib.2=libart.so
ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.isa.$(TARGET_ARCH).variant=$(DEX2OAT_TARGET_CPU_VARIANT)
ifneq ($(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES),)
ADDITIONAL_BUILD_PROPERTIES += dalvik.vm.isa.$(TARGET_ARCH).features=$(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES)
@@ -469,7 +438,12 @@ endif
ifneq ($(ONE_SHOT_MAKEFILE),)
# We've probably been invoked by the "mm" shell function
# with a subdirectory's makefile.
+
+# No Makefiles to include if we are performing a mms/short-circuit build. Only
+# the targets mentioned by main.mk and tasks/* are built (kernel, boot.img etc)
+ifneq ($(ONE_SHOT_MAKEFILE),__none__)
include $(ONE_SHOT_MAKEFILE)
+endif
# Change CUSTOM_MODULES to include only modules that were
# defined by this makefile; this will install all of those
# modules as a side-effect. Do this after including ONE_SHOT_MAKEFILE
@@ -500,7 +474,7 @@ ifneq ($(dont_bother),true)
subdir_makefiles := \
$(shell build/tools/findleaves.py $(FIND_LEAVES_EXCLUDES) $(subdirs) Android.mk)
-$(foreach mk, $(subdir_makefiles), $(info including $(mk) ...)$(eval include $(mk)))
+$(foreach mk, $(subdir_makefiles), $(eval include $(mk)))
endif # dont_bother
@@ -1021,7 +995,7 @@ $(foreach module,$(sample_MODULES),$(eval $(call \
sample_ADDITIONAL_INSTALLED := \
$(filter-out $(modules_to_install) $(modules_to_check) $(ALL_PREBUILT),$(sample_MODULES))
samplecode: $(sample_APKS_COLLECTION)
- @echo "Collect sample code apks: $^"
+ @echo -e ${CL_GRN}"Collect sample code apks:"${CL_RST}" $^"
# remove apks that are not intended to be installed.
rm -f $(sample_ADDITIONAL_INSTALLED)
endif # samplecode in $(MAKECMDGOALS)
@@ -1032,7 +1006,7 @@ findbugs: $(INTERNAL_FINDBUGS_HTML_TARGET) $(INTERNAL_FINDBUGS_XML_TARGET)
.PHONY: clean
clean:
@rm -rf $(OUT_DIR)/*
- @echo "Entire build directory removed."
+ @echo -e ${CL_GRN}"Entire build directory removed."${CL_RST}
.PHONY: clobber
clobber: clean
@@ -1042,7 +1016,7 @@ clobber: clean
#xxx scrape this from ALL_MODULE_NAME_TAGS
.PHONY: modules
modules:
- @echo "Available sub-modules:"
+ @echo -e ${CL_GRN}"Available sub-modules:"${CL_RST}
@echo "$(call module-names-for-tag-list,$(ALL_MODULE_TAGS))" | \
tr -s ' ' '\n' | sort -u | $(COLUMN)
diff --git a/core/mtk_target.mk b/core/mtk_target.mk
new file mode 100644
index 0000000..7c3ba1f
--- /dev/null
+++ b/core/mtk_target.mk
@@ -0,0 +1,13 @@
+ifeq ($(BOARD_USES_MTK_HARDWARE),true)
+ mtk_flags := -DMTK_HARDWARE
+
+ TARGET_GLOBAL_CFLAGS += $(mtk_flags)
+ TARGET_GLOBAL_CPPFLAGS += $(mtk_flags)
+ CLANG_TARGET_GLOBAL_CFLAGS += $(mtk_flags)
+ CLANG_TARGET_GLOBAL_CPPFLAGS += $(mtk_flags)
+
+ 2ND_TARGET_GLOBAL_CFLAGS += $(mtk_flags)
+ 2ND_TARGET_GLOBAL_CPPFLAGS += $(mtk_flags)
+ 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(mtk_flags)
+ 2ND_CLANG_TARGET_GLOBAL_CPPFLAGS += $(mtk_flags)
+endif
diff --git a/core/mtk_utils.mk b/core/mtk_utils.mk
new file mode 100755
index 0000000..48fd660
--- /dev/null
+++ b/core/mtk_utils.mk
@@ -0,0 +1,5 @@
+# Board platforms lists to be used for
+# TARGET_BOARD_PLATFORM specific featurization
+MTK_BOARD_PLATFORMS := mt6592
+MTK_BOARD_PLATFORMS += mt6582
+MTK_BOARD_PLATFORMS += mt6572
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 43a5435..184d62c 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -62,7 +62,7 @@ installed_notice_file := $($(my_prefix)OUT_NOTICE_FILES)/src/$(module_installed_
$(installed_notice_file): PRIVATE_INSTALLED_MODULE := $(module_installed_filename)
$(installed_notice_file): $(notice_file)
- @echo Notice file: $< -- $@
+ @echo -e ${CL_CYN}Notice file:${CL_RST} $< -- $@
$(hide) mkdir -p $(dir $@)
$(hide) cat $< > $@
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 23648c1..27fe9b4 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -320,11 +320,24 @@ framework_res_package_export_deps := $(framework_res_package_export)
else # LOCAL_SDK_RES_VERSION
framework_res_package_export := \
$(call intermediates-dir-for,APPS,framework-res,,COMMON)/package-export.apk
+
+# Avoid possible circular dependency with our platform-res
+ifneq ($(LOCAL_IGNORE_SUBDIR), true)
+cm_plat_res_package_export := \
+ $(call intermediates-dir-for,APPS,org.cyanogenmod.platform-res,,COMMON)/package-export.apk
+endif # LOCAL_IGNORE_SUBDIR
+
# We can't depend directly on the export.apk file; it won't get its
# PRIVATE_ vars set up correctly if we do. Instead, depend on the
# corresponding R.stamp file, which lists the export.apk as a dependency.
framework_res_package_export_deps := \
$(dir $(framework_res_package_export))src/R.stamp
+
+ifneq ($(LOCAL_IGNORE_SUBDIR), true)
+cm_plat_res_package_export_deps := \
+ $(dir $(cm_plat_res_package_export))src/R.stamp
+endif # LOCAL_IGNORE_SUBDIR
+
endif # LOCAL_SDK_RES_VERSION
all_library_res_package_exports := \
$(framework_res_package_export) \
@@ -336,6 +349,13 @@ all_library_res_package_export_deps := \
$(foreach lib,$(LOCAL_RES_LIBRARIES),\
$(call intermediates-dir-for,APPS,$(lib),,COMMON)/src/R.stamp)
+ifneq ($(LOCAL_IGNORE_SUBDIR), true)
+all_library_res_package_exports += \
+ $(cm_plat_res_package_export)
+all_library_res_package_export_deps += \
+ $(cm_plat_res_package_export_deps)
+endif # LOCAL_IGNORE_SUBDIR
+
$(resource_export_package) $(R_file_stamp) $(LOCAL_BUILT_MODULE): $(all_library_res_package_export_deps)
$(LOCAL_INTERMEDIATE_TARGETS): \
PRIVATE_AAPT_INCLUDES := $(all_library_res_package_exports)
diff --git a/core/pathmap.mk b/core/pathmap.mk
index b300ff5..a23aafc 100644
--- a/core/pathmap.mk
+++ b/core/pathmap.mk
@@ -41,7 +41,6 @@ pathmap_INCL := \
libstdc++:bionic/libstdc++/include \
mkbootimg:system/core/mkbootimg \
opengl-tests-includes:frameworks/native/opengl/tests/include \
- recovery:bootable/recovery \
system-core:system/core/include \
audio:system/media/audio/include \
audio-effects:system/media/audio_effects/include \
@@ -63,6 +62,36 @@ define include-path-for
$(foreach n,$(1),$(patsubst $(n):%,%,$(filter $(n):%,$(pathmap_INCL))))
endef
+# Enter project path into pathmap
+#
+# $(1): name
+# $(2): path
+#
+define project-set-path
+$(eval pathmap_PROJ += $(1):$(2))
+endef
+
+# Enter variant project path into pathmap
+#
+# $(1): name
+# $(2): variable to check
+# $(3): base path
+#
+define project-set-path-variant
+ $(call project-set-path,$(1),$(strip \
+ $(if $($(2)), \
+ $(3)-$($(2)), \
+ $(3))))
+endef
+
+# Returns the path to the requested module's include directory,
+# relative to the root of the source tree.
+#
+# $(1): a list of modules (or other named entities) to find the projects for
+define project-path-for
+$(foreach n,$(1),$(patsubst $(n):%,%,$(filter $(n):%,$(pathmap_PROJ))))
+endef
+
#
# Many modules expect to be able to say "#include <jni.h>",
# so make it easy for them to find the correct path.
diff --git a/core/pdk_config.mk b/core/pdk_config.mk
index 262b50e..148c370 100644
--- a/core/pdk_config.mk
+++ b/core/pdk_config.mk
@@ -116,7 +116,7 @@ endif
endif
$(_pdk_fusion_stamp) : $(PDK_FUSION_PLATFORM_ZIP)
- @echo "Unzip $(dir $@) <- $<"
+ @echo -e ${CL_YLW}"Unzip"${CL_RST}" $(dir $@) <- $<"
$(hide) rm -rf $(dir $@) && mkdir -p $(dir $@)
$(hide) unzip -qo $< -d $(dir $@)
$(call split-long-arguments,-touch,$(_pdk_fusion_files))
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index 0406353..2a4b716 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -190,6 +190,11 @@ ifeq ($(DONT_DEXPREOPT_PREBUILTS),true)
LOCAL_DEX_PREOPT := false
endif
+# Disable dex-preopt of specific prebuilts to save space, if requested.
+ifneq ($(filter $(DEXPREOPT_BLACKLIST),$(LOCAL_MODULE)),)
+LOCAL_DEX_PREOPT := false
+endif
+
#######################################
# defines built_odex along with rule to install odex
include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
@@ -211,7 +216,9 @@ $(built_module): PRIVATE_EMBEDDED_JNI_LIBS := $(embedded_prebuilt_jni_libs)
$(built_module) : $(my_prebuilt_src_file) | $(ACP) $(ZIPALIGN) $(SIGNAPK_JAR)
$(transform-prebuilt-to-target)
+ifneq ($(LOCAL_MODULE_PATH),$(TARGET_OUT_VENDOR)/bundled-app)
$(uncompress-shared-libs)
+endif
ifneq ($(LOCAL_CERTIFICATE),PRESIGNED)
@# Only strip out files if we can re-sign the package.
ifdef LOCAL_DEX_PREOPT
@@ -253,7 +260,7 @@ $(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk | $(ACP)
# Rules to install the split apks.
$(installed_apk_splits) : $(my_module_path)/%.apk : $(built_module_path)/%.apk | $(ACP)
- @echo "Install: $@"
+ @echo -e ${CL_CYN}"Install: $@"${CL_RST}
$(copy-file-to-new-target)
# Register the additional built and installed files.
diff --git a/core/product.mk b/core/product.mk
index e97cba4..cced554 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -65,6 +65,8 @@ endef
#
_product_var_list := \
+ PRODUCT_BOOTANIMATION \
+ PRODUCT_BUILD_PROP_OVERRIDES \
PRODUCT_NAME \
PRODUCT_MODEL \
PRODUCT_LOCALES \
@@ -82,6 +84,7 @@ _product_var_list := \
PRODUCT_DEFAULT_PROPERTY_OVERRIDES \
PRODUCT_CHARACTERISTICS \
PRODUCT_COPY_FILES \
+ PRODUCT_COPY_FILES_OVERRIDES \
PRODUCT_OTA_PUBLIC_KEYS \
PRODUCT_EXTRA_RECOVERY_KEYS \
PRODUCT_PACKAGE_OVERLAYS \
@@ -125,23 +128,65 @@ $(foreach p,$(PRODUCTS),$(call dump-product,$(p)))
endef
#
+# Internal function. Appends inherited product variables to an existing one.
+#
+# $(1): Product variable to operate on
+# $(2): Value to append
+#
+define inherit-product_append-var
+ $(eval $(1) := $($(1)) $(INHERIT_TAG)$(strip $(2)))
+endef
+
+#
+# Internal function. Prepends inherited product variables to an existing one.
+#
+# $(1): Product variable to operate on
+# $(2): Value to prepend
+#
+define inherit-product_prepend-var
+ $(eval $(1) := $(INHERIT_TAG)$(strip $(2)) $($(1)))
+endef
+
+#
+# Internal function. Tracks visited notes during inheritance resolution.
+#
+# $(1): Product being inherited
+#
+define inherit-product_track-node
+ $(eval inherit_var := \
+ PRODUCTS.$(strip $(word 1,$(_include_stack))).INHERITS_FROM) \
+ $(eval $(inherit_var) := $(sort $($(inherit_var)) $(strip $(1)))) \
+ $(eval inherit_var:=) \
+ $(eval ALL_PRODUCTS := $(sort $(ALL_PRODUCTS) $(word 1,$(_include_stack))))
+endef
+
+#
# $(1): product to inherit
#
# Does three things:
-# 1. Inherits all of the variables from $1.
+# 1. Inherits all of the variables from $1, prioritizing existing settings.
# 2. Records the inheritance in the .INHERITS_FROM variable
# 3. Records that we've visited this node, in ALL_PRODUCTS
#
define inherit-product
$(foreach v,$(_product_var_list), \
- $(eval $(v) := $($(v)) $(INHERIT_TAG)$(strip $(1)))) \
- $(eval inherit_var := \
- PRODUCTS.$(strip $(word 1,$(_include_stack))).INHERITS_FROM) \
- $(eval $(inherit_var) := $(sort $($(inherit_var)) $(strip $(1)))) \
- $(eval inherit_var:=) \
- $(eval ALL_PRODUCTS := $(sort $(ALL_PRODUCTS) $(word 1,$(_include_stack))))
+ $(call inherit-product_append-var,$(v),$(1))) \
+ $(call inherit-product_track-node,$(1))
endef
+#
+# $(1): product to inherit
+#
+# Does three things:
+# 1. Inherits all of the variables from $1, prioritizing inherited settings.
+# 2. Records the inheritance in the .INHERITS_FROM variable
+# 3. Records that we've visited this node, in ALL_PRODUCTS
+#
+define prepend-product
+ $(foreach v,$(_product_var_list), \
+ $(call inherit-product_prepend-var,$(v),$(1))) \
+ $(call inherit-product_track-node,$(1))
+endef
#
# Do inherit-product only if $(1) exists
@@ -151,6 +196,13 @@ define inherit-product-if-exists
endef
#
+# Do inherit-product-prepend only if $(1) exists
+#
+define prepend-product-if-exists
+ $(if $(wildcard $(1)),$(call prepend-product,$(1)),)
+endef
+
+#
# $(1): product makefile list
#
#TODO: check to make sure that products have all the necessary vars defined
@@ -275,6 +327,10 @@ _product_stash_var_list += \
GLOBAL_CFLAGS_NO_OVERRIDE \
GLOBAL_CPPFLAGS_NO_OVERRIDE \
+_product_stash_var_list += \
+ TARGET_SKIP_DEFAULT_LOCALE \
+ TARGET_SKIP_PRODUCT_DEVICE \
+
#
# Stash values of the variables in _product_stash_var_list.
# $(1): Renamed prefix
diff --git a/core/product_config.mk b/core/product_config.mk
index 5240ae7..259d983 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -117,7 +117,7 @@ ifdef product_goals
# which really means TARGET_PRODUCT=dream make installclean.
ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
MAKECMDGOALS := $(MAKECMDGOALS) $(TARGET_BUILD_VARIANT)
- TARGET_BUILD_VARIANT := eng
+ TARGET_BUILD_VARIANT := userdebug
default_goal_substitution :=
else
default_goal_substitution := $(DEFAULT_GOAL)
@@ -179,16 +179,22 @@ include $(BUILD_SYSTEM)/node_fns.mk
include $(BUILD_SYSTEM)/product.mk
include $(BUILD_SYSTEM)/device.mk
-ifneq ($(strip $(TARGET_BUILD_APPS)),)
-# An unbundled app build needs only the core product makefiles.
-all_product_configs := $(call get-product-makefiles,\
- $(SRC_TARGET_DIR)/product/AndroidProducts.mk)
+# A CM build needs only the CM product makefiles.
+ifneq ($(CM_BUILD),)
+ all_product_configs := $(shell find device -path "*/$(CM_BUILD)/cm.mk")
else
-# Read in all of the product definitions specified by the AndroidProducts.mk
-# files in the tree.
-all_product_configs := $(get-all-product-makefiles)
-endif
+ ifneq ($(strip $(TARGET_BUILD_APPS)),)
+ # An unbundled app build needs only the core product makefiles.
+ all_product_configs := $(call get-product-makefiles,\
+ $(SRC_TARGET_DIR)/product/AndroidProducts.mk)
+ else
+ # Read in all of the product definitions specified by the AndroidProducts.mk
+ # files in the tree.
+ all_product_configs := $(get-all-product-makefiles)
+ endif # TARGET_BUILD_APPS
+endif # CM_BUILD
+ifeq ($(CM_BUILD),)
# Find the product config makefile for the current product.
# all_product_configs consists items like:
# <product_name>:<path_to_the_product_makefile>
@@ -207,12 +213,18 @@ $(foreach f, $(all_product_configs),\
$(eval all_product_makefiles += $(f))\
$(if $(filter $(TARGET_PRODUCT),$(basename $(notdir $(f)))),\
$(eval current_product_makefile += $(f)),)))
+
_cpm_words :=
_cpm_word1 :=
_cpm_word2 :=
+else
+ current_product_makefile := $(strip $(all_product_configs))
+ all_product_makefiles := $(strip $(all_product_configs))
+endif
current_product_makefile := $(strip $(current_product_makefile))
all_product_makefiles := $(strip $(all_product_makefiles))
+
ifneq (,$(filter product-graph dump-products, $(MAKECMDGOALS)))
# Import all product makefiles.
$(call import-products, $(all_product_makefiles))
@@ -331,6 +343,28 @@ endif
# The optional :<owner> is used to indicate the owner of a vendor file.
PRODUCT_COPY_FILES := \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_COPY_FILES))
+_boot_animation := $(strip $(lastword $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BOOTANIMATION)))
+ifneq ($(_boot_animation),)
+PRODUCT_COPY_FILES += \
+ $(_boot_animation):system/media/bootanimation.zip
+endif
+_boot_animation :=
+
+# We might want to skip items listed in PRODUCT_COPY_FILES for
+# various reasons. This is useful for replacing a binary module with one
+# built from source. This should be a list of destination files under $OUT
+PRODUCT_COPY_FILES_OVERRIDES := \
+ $(addprefix %:, $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_COPY_FILES_OVERRIDES)))
+
+ifneq ($(PRODUCT_COPY_FILES_OVERRIDES),)
+ PRODUCT_COPY_FILES := $(filter-out $(PRODUCT_COPY_FILES_OVERRIDES), $(PRODUCT_COPY_FILES))
+endif
+
+.PHONY: listcopies
+listcopies:
+ @echo "Copy files: $(PRODUCT_COPY_FILES)"
+ @echo "Overrides: $(PRODUCT_COPY_FILES_OVERRIDES)"
+
# A list of property assignments, like "key = value", with zero or more
# whitespace characters on either side of the '='.
@@ -343,6 +377,9 @@ PRODUCT_PROPERTY_OVERRIDES := \
PRODUCT_DEFAULT_PROPERTY_OVERRIDES := \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+PRODUCT_BUILD_PROP_OVERRIDES := \
+ $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_PROP_OVERRIDES))
+
# Should we use the default resources or add any product specific overlays
PRODUCT_PACKAGE_OVERLAYS := \
$(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PACKAGE_OVERLAYS))
diff --git a/core/qcom_target.mk b/core/qcom_target.mk
new file mode 100644
index 0000000..2dde2d9
--- /dev/null
+++ b/core/qcom_target.mk
@@ -0,0 +1,128 @@
+# Target-specific configuration
+
+# Populate the qcom hardware variants in the project pathmap.
+define ril-set-path-variant
+$(call project-set-path-variant,ril,TARGET_RIL_VARIANT,hardware/$(1))
+endef
+define wlan-set-path-variant
+$(call project-set-path-variant,wlan,TARGET_WLAN_VARIANT,hardware/qcom/$(1))
+endef
+define bt-vendor-set-path-variant
+$(call project-set-path-variant,bt-vendor,TARGET_BT_VENDOR_VARIANT,hardware/qcom/$(1))
+endef
+
+# Set device-specific HALs into project pathmap
+define set-device-specific-path
+$(if $(USE_DEVICE_SPECIFIC_$(1)), \
+ $(if $(DEVICE_SPECIFIC_$(1)_PATH), \
+ $(eval path := $(DEVICE_SPECIFIC_$(1)_PATH)), \
+ $(eval path := $(TARGET_DEVICE_DIR)/$(2))), \
+ $(eval path := $(3))) \
+$(call project-set-path,qcom-$(2),$(strip $(path)))
+endef
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+
+ qcom_flags := -DQCOM_HARDWARE
+ qcom_flags += -DQCOM_BSP
+ qcom_flags += -DQTI_BSP
+
+ TARGET_USES_QCOM_BSP := true
+
+ # Tell HALs that we're compiling an AOSP build with an in-line kernel
+ TARGET_COMPILE_WITH_MSM_KERNEL := true
+
+ ifneq ($(filter msm7x27a msm7x30 msm8660 msm8960,$(TARGET_BOARD_PLATFORM)),)
+ # Enable legacy graphics functions
+ qcom_flags += -DQCOM_BSP_LEGACY
+ # Enable legacy audio functions
+ ifeq ($(BOARD_USES_LEGACY_ALSA_AUDIO),true)
+ USE_CUSTOM_AUDIO_POLICY := 1
+ qcom_flags += -DLEGACY_ALSA_AUDIO
+ endif
+ endif
+
+ TARGET_GLOBAL_CFLAGS += $(qcom_flags)
+ TARGET_GLOBAL_CPPFLAGS += $(qcom_flags)
+ CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags)
+ CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags)
+
+ # Multiarch needs these too..
+ 2ND_TARGET_GLOBAL_CFLAGS += $(qcom_flags)
+ 2ND_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags)
+ 2ND_CLANG_TARGET_GLOBAL_CFLAGS += $(qcom_flags)
+ 2ND_CLANG_TARGET_GLOBAL_CPPFLAGS += $(qcom_flags)
+
+ ifeq ($(QCOM_HARDWARE_VARIANT),)
+ ifneq ($(filter msm8610 msm8226 msm8974,$(TARGET_BOARD_PLATFORM)),)
+ QCOM_HARDWARE_VARIANT := msm8974
+ else
+ ifneq ($(filter msm8909 msm8916,$(TARGET_BOARD_PLATFORM)),)
+ QCOM_HARDWARE_VARIANT := msm8916
+ else
+ ifneq ($(filter msm8953 msm8937,$(TARGET_BOARD_PLATFORM)),)
+ QCOM_HARDWARE_VARIANT := msm8937
+ else
+ ifneq ($(filter msm8992 msm8994,$(TARGET_BOARD_PLATFORM)),)
+ QCOM_HARDWARE_VARIANT := msm8994
+ else
+ QCOM_HARDWARE_VARIANT := $(TARGET_BOARD_PLATFORM)
+ endif
+ endif
+ endif
+ endif
+ endif
+
+# HACK: check to see if build uses standard QC HAL paths by checking for CM path structure
+AOSP_VARIANT_MAKEFILE := $(wildcard hardware/qcom/audio/default/Android.mk)
+ifeq ("$(AOSP_VARIANT_MAKEFILE)","")
+$(call project-set-path,qcom-audio,hardware/qcom/audio)
+$(call project-set-path,qcom-display,hardware/qcom/display)
+$(call project-set-path,qcom-media,hardware/qcom/media)
+$(call set-device-specific-path,CAMERA,camera,hardware/qcom/camera)
+$(call set-device-specific-path,GPS,gps,hardware/qcom/gps)
+$(call set-device-specific-path,SENSORS,sensors,hardware/qcom/sensors)
+$(call set-device-specific-path,LOC_API,loc-api,vendor/qcom/opensource/location)
+$(call set-device-specific-path,DATASERVICES,dataservices,vendor/qcom/opensource/dataservices)
+$(call project-set-path,ril,hardware/ril)
+$(call project-set-path,wlan,hardware/qcom/wlan)
+$(call project-set-path,bt-vendor,hardware/qcom/bt)
+else
+$(call project-set-path,qcom-audio,hardware/qcom/audio-caf/$(QCOM_HARDWARE_VARIANT))
+
+ifeq ($(SONY_BF64_KERNEL_VARIANT),true)
+$(call project-set-path,qcom-display,hardware/qcom/display-caf/sony)
+$(call project-set-path,qcom-media,hardware/qcom/media-caf/sony)
+else
+$(call project-set-path,qcom-display,hardware/qcom/display-caf/$(QCOM_HARDWARE_VARIANT))
+$(call project-set-path,qcom-media,hardware/qcom/media-caf/$(QCOM_HARDWARE_VARIANT))
+endif
+
+$(call set-device-specific-path,CAMERA,camera,hardware/qcom/camera)
+$(call set-device-specific-path,GPS,gps,hardware/qcom/gps)
+$(call set-device-specific-path,SENSORS,sensors,hardware/qcom/sensors)
+$(call set-device-specific-path,LOC_API,loc-api,vendor/qcom/opensource/location)
+$(call set-device-specific-path,DATASERVICES,dataservices,vendor/qcom/opensource/dataservices)
+
+$(call ril-set-path-variant,ril)
+$(call wlan-set-path-variant,wlan-caf)
+$(call bt-vendor-set-path-variant,bt-caf)
+endif # AOSP_VARIANT_MAKEFILE
+
+else
+
+$(call project-set-path,qcom-audio,hardware/qcom/audio/default)
+$(call project-set-path,qcom-display,hardware/qcom/display/$(TARGET_BOARD_PLATFORM))
+$(call project-set-path,qcom-media,hardware/qcom/media/default)
+
+$(call project-set-path,qcom-camera,hardware/qcom/camera)
+$(call project-set-path,qcom-gps,hardware/qcom/gps)
+$(call project-set-path,qcom-sensors,hardware/qcom/sensors)
+$(call project-set-path,qcom-loc-api,vendor/qcom/opensource/location)
+$(call project-set-path,qcom-dataservices,$(TARGET_DEVICE_DIR)/dataservices)
+
+$(call ril-set-path-variant,ril)
+$(call wlan-set-path-variant,wlan)
+$(call bt-vendor-set-path-variant,bt)
+
+endif
diff --git a/core/qcom_utils.mk b/core/qcom_utils.mk
new file mode 100755
index 0000000..50e0b4e
--- /dev/null
+++ b/core/qcom_utils.mk
@@ -0,0 +1,230 @@
+# Board platforms lists to be used for
+# TARGET_BOARD_PLATFORM specific featurization
+QCOM_BOARD_PLATFORMS += msm7x27a
+QCOM_BOARD_PLATFORMS += msm7x30
+QCOM_BOARD_PLATFORMS += msm8226
+QCOM_BOARD_PLATFORMS += msm8610
+QCOM_BOARD_PLATFORMS += msm8660
+QCOM_BOARD_PLATFORMS += msm8909
+QCOM_BOARD_PLATFORMS += msm8916
+QCOM_BOARD_PLATFORMS += msm8960
+QCOM_BOARD_PLATFORMS += msm8974
+QCOM_BOARD_PLATFORMS += mpq8092
+QCOM_BOARD_PLATFORMS += msm8937
+QCOM_BOARD_PLATFORMS += msm8952
+QCOM_BOARD_PLATFORMS += msm8953
+QCOM_BOARD_PLATFORMS += msm8992
+QCOM_BOARD_PLATFORMS += msm8994
+QCOM_BOARD_PLATFORMS += msm8996
+QCOM_BOARD_PLATFORMS += msm_bronze
+QCOM_BOARD_PLATFORMS += apq8084
+
+MSM7K_BOARD_PLATFORMS := msm7x30
+MSM7K_BOARD_PLATFORMS += msm7x27
+MSM7K_BOARD_PLATFORMS += msm7x27a
+MSM7K_BOARD_PLATFORMS += msm7k
+
+QSD8K_BOARD_PLATFORMS := qsd8k
+
+
+# vars for use by utils
+empty :=
+space := $(empty) $(empty)
+colon := $(empty):$(empty)
+underscore := $(empty)_$(empty)
+
+# $(call match-word,w1,w2)
+# checks if w1 == w2
+# How it works
+# if (w1-w2 not empty or w2-w1 not empty) then not_match else match
+#
+# returns true or empty
+#$(warning :$(1): :$(2): :$(subst $(1),,$(2)):) \
+#$(warning :$(2): :$(1): :$(subst $(2),,$(1)):) \
+#
+define match-word
+$(strip \
+ $(if $(or $(subst $(1),$(empty),$(2)),$(subst $(2),$(empty),$(1))),,true) \
+)
+endef
+
+# $(call find-word-in-list,w,wlist)
+# finds an exact match of word w in word list wlist
+#
+# How it works
+# fill wlist spaces with colon
+# wrap w with colon
+# search word w in list wl, if found match m, return stripped word w
+#
+# returns stripped word or empty
+define find-word-in-list
+$(strip \
+ $(eval wl:= $(colon)$(subst $(space),$(colon),$(strip $(2)))$(colon)) \
+ $(eval w:= $(colon)$(strip $(1))$(colon)) \
+ $(eval m:= $(findstring $(w),$(wl))) \
+ $(if $(m),$(1),) \
+)
+endef
+
+# $(call match-word-in-list,w,wlist)
+# does an exact match of word w in word list wlist
+# How it works
+# if the input word is not empty
+# return output of an exact match of word w in wordlist wlist
+# else
+# return empty
+# returns true or empty
+define match-word-in-list
+$(strip \
+ $(if $(strip $(1)), \
+ $(call match-word,$(call find-word-in-list,$(1),$(2)),$(strip $(1))), \
+ ) \
+)
+endef
+
+# $(call match-prefix,p,delim,w/wlist)
+# matches prefix p in wlist using delimiter delim
+#
+# How it works
+# trim the words in wlist w
+# if find-word-in-list returns not empty
+# return true
+# else
+# return empty
+#
+define match-prefix
+$(strip \
+ $(eval w := $(strip $(1)$(strip $(2)))) \
+ $(eval text := $(patsubst $(w)%,$(1),$(3))) \
+ $(if $(call match-word-in-list,$(1),$(text)),true,) \
+)
+endef
+
+# ----
+# The following utilities are meant for board platform specific
+# featurisation
+
+# $(call get-vendor-board-platforms,v)
+# returns list of board platforms for vendor v
+define get-vendor-board-platforms
+$(if $(call match-word,$(BOARD_USES_$(1)_HARDWARE),true),$($(1)_BOARD_PLATFORMS))
+endef
+
+# $(call is-board-platform,bp)
+# returns true or empty
+define is-board-platform
+$(call match-word,$(1),$(TARGET_BOARD_PLATFORM))
+endef
+
+# $(call is-not-board-platform,bp)
+# returns true or empty
+define is-not-board-platform
+$(if $(call match-word,$(1),$(TARGET_BOARD_PLATFORM)),,true)
+endef
+
+# $(call is-board-platform-in-list,bpl)
+# returns true or empty
+define is-board-platform-in-list
+$(call match-word-in-list,$(TARGET_BOARD_PLATFORM),$(1))
+endef
+
+# $(call is-vendor-board-platform,vendor)
+# returns true or empty
+define is-vendor-board-platform
+$(strip \
+ $(call match-word-in-list,$(TARGET_BOARD_PLATFORM),\
+ $(call get-vendor-board-platforms,$(1)) \
+ ) \
+)
+endef
+
+# $(call is-chipset-in-board-platform,chipset)
+# does a prefix match of chipset in TARGET_BOARD_PLATFORM
+# uses underscore as a delimiter
+#
+# returns true or empty
+define is-chipset-in-board-platform
+$(call match-prefix,$(1),$(underscore),$(TARGET_BOARD_PLATFORM))
+endef
+
+# $(call is-chipset-prefix-in-board-platform,prefix)
+# does a chipset prefix match in TARGET_BOARD_PLATFORM
+# assumes '_' and 'a' as the delimiter to the chipset prefix
+#
+# How it works
+# if ($(prefix)_ or $(prefix)a match in board platform)
+# return true
+# else
+# return empty
+#
+define is-chipset-prefix-in-board-platform
+$(strip \
+ $(eval delim_a := $(empty)a$(empty)) \
+ $(if \
+ $(or \
+ $(call match-prefix,$(1),$(delim_a),$(TARGET_BOARD_PLATFORM)), \
+ $(call match-prefix,$(1),$(underscore),$(TARGET_BOARD_PLATFORM)), \
+ ), \
+ true, \
+ ) \
+)
+endef
+
+#----
+# The following utilities are meant for Android Code Name
+# specific featurisation
+#
+# refer http://source.android.com/source/build-numbers.html
+# for code names and associated sdk versions
+CUPCAKE_SDK_VERSIONS := 3
+DONUT_SDK_VERSIONS := 4
+ECLAIR_SDK_VERSIONS := 5 6 7
+FROYO_SDK_VERSIONS := 8
+GINGERBREAD_SDK_VERSIONS := 9 10
+HONEYCOMB_SDK_VERSIONS := 11 12 13
+ICECREAM_SANDWICH_SDK_VERSIONS := 14 15
+JELLY_BEAN_SDK_VERSIONS := 16 17 18
+
+# $(call is-platform-sdk-version-at-least,version)
+# version is a numeric SDK_VERSION defined above
+define is-platform-sdk-version-at-least
+$(strip \
+ $(if $(filter 1,$(shell echo "$$(( $(PLATFORM_SDK_VERSION) >= $(1) ))" )), \
+ true, \
+ ) \
+)
+endef
+
+# $(call is-android-codename,codename)
+# codename is one of cupcake,donut,eclair,froyo,gingerbread,icecream
+# please refer the $(codename)_SDK_VERSIONS declared above
+define is-android-codename
+$(strip \
+ $(if \
+ $(call match-word-in-list,$(PLATFORM_SDK_VERSION),$($(1)_SDK_VERSIONS)), \
+ true, \
+ ) \
+)
+endef
+
+# $(call is-android-codename-in-list,cnlist)
+# cnlist is combination/list of android codenames
+define is-android-codename-in-list
+$(strip \
+ $(eval acn := $(empty)) \
+ $(foreach \
+ i,$(1),\
+ $(eval acn += \
+ $(if \
+ $(call \
+ match-word-in-list,\
+ $(PLATFORM_SDK_VERSION),\
+ $($(i)_SDK_VERSIONS)\
+ ),\
+ true,\
+ )\
+ )\
+ ) \
+ $(if $(strip $(acn)),true,) \
+)
+endef
diff --git a/core/static_java_library.mk b/core/static_java_library.mk
index 9b7b46a..99f4455 100644
--- a/core/static_java_library.mk
+++ b/core/static_java_library.mk
@@ -122,7 +122,7 @@ $(R_file_stamp): PRIVATE_MANIFEST_PACKAGE_NAME :=
$(R_file_stamp): PRIVATE_MANIFEST_INSTRUMENTATION_FOR :=
$(R_file_stamp) : $(all_resources) $(full_android_manifest) $(AAPT) $(framework_res_package_export_deps)
- @echo "target R.java/Manifest.java: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_YLW}"target R.java/Manifest.java:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(create-resource-java-files)
$(hide) find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name R.java | xargs cat > $@
@@ -140,6 +140,7 @@ $(built_aar): PRIVATE_ANDROID_MANIFEST := $(full_android_manifest)
$(built_aar): PRIVATE_CLASSES_JAR := $(LOCAL_BUILT_MODULE)
$(built_aar): PRIVATE_RESOURCE_DIR := $(LOCAL_RESOURCE_DIR)
$(built_aar): PRIVATE_R_TXT := $(LOCAL_INTERMEDIATE_SOURCE_DIR)/R.txt
+$(built_aar): PRIVATE_CONSUMER_PROGUARD_FILE := $(LOCAL_CONSUMER_PROGUARD_FILE)
$(built_aar) : $(LOCAL_BUILT_MODULE)
@echo "target AAR: $(PRIVATE_MODULE) ($@)"
$(hide) rm -rf $(dir $@)aar && mkdir -p $(dir $@)aar/res
@@ -148,6 +149,9 @@ $(built_aar) : $(LOCAL_BUILT_MODULE)
# Note: Use "cp -n" to honor the resource overlay rules, if multiple res dirs exist.
$(hide) $(foreach res,$(PRIVATE_RESOURCE_DIR),cp -Rfn $(res)/* $(dir $@)aar/res;)
$(hide) cp $(PRIVATE_R_TXT) $(dir $@)aar/R.txt
+ $(hide) if [ ! -z "$(PRIVATE_CONSUMER_PROGUARD_FILE)" ]; then \
+ echo "Including '$(PRIVATE_CONSUMER_PROGUARD_FILE)'"; \
+ cp $(PRIVATE_CONSUMER_PROGUARD_FILE) $(dir $@)aar/proguard.txt; fi
$(hide) jar -cMf $@ \
-C $(dir $@)aar .
diff --git a/core/tasks/apicheck.mk b/core/tasks/apicheck.mk
index 683a075..f109527 100644
--- a/core/tasks/apicheck.mk
+++ b/core/tasks/apicheck.mk
@@ -76,9 +76,9 @@ $(eval $(call check-api, \
.PHONY: update-public-api
update-public-api: $(INTERNAL_PLATFORM_API_FILE) | $(ACP)
- @echo Copying current.txt
+ @echo -e ${CL_GRN}"Copying current.txt"${CL_RST}
$(hide) $(ACP) $(INTERNAL_PLATFORM_API_FILE) frameworks/base/api/current.txt
- @echo Copying removed.txt
+ @echo -e ${CL_GRN}"Copying removed.txt"${CL_RST}
$(hide) $(ACP) $(INTERNAL_PLATFORM_REMOVED_API_FILE) frameworks/base/api/removed.txt
update-api : update-public-api
diff --git a/core/tasks/boot_jars_package_check.mk b/core/tasks/boot_jars_package_check.mk
index 188c267..28f2b82 100644
--- a/core/tasks/boot_jars_package_check.mk
+++ b/core/tasks/boot_jars_package_check.mk
@@ -16,6 +16,7 @@
# Rules to check if classes in the boot jars are from the whitelisted packages.
#
+ifneq ($(SKIP_BOOT_JARS_CHECK),)
ifneq ($(SKIP_BOOT_JARS_CHECK),true)
ifneq ($(TARGET_BUILD_PDK),true)
ifdef PRODUCT_BOOT_JARS
@@ -44,3 +45,4 @@ droidcore : check-boot-jars
endif # PRODUCT_BOOT_JARS
endif # TARGET_BUILD_PDK not true
endif # SKIP_BOOT_JARS_CHECK not true
+endif # SKIP_BOOT_JARS_CHECK not defined
diff --git a/core/tasks/check_boot_jars/check_boot_jars.py b/core/tasks/check_boot_jars/check_boot_jars.py
index 5a0ec40..cd0bd75 100755
--- a/core/tasks/check_boot_jars/check_boot_jars.py
+++ b/core/tasks/check_boot_jars/check_boot_jars.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import print_function
+
"""
Check boot jars.
@@ -55,15 +57,15 @@ def CheckJar(jar):
package_name = package_name.replace('/', '.')
# Skip class without a package name
if package_name and not whitelist_re.match(package_name):
- print >> sys.stderr, ('Error: %s: unknown package name of class file %s'
- % (jar, f))
+ print('Error: %s: unknown package name of class file %s'
+ % (jar, f), file=sys.stderr)
return False
return True
def main(argv):
if len(argv) < 2:
- print __doc__
+ print(__doc__)
return 1
if not LoadWhitelist(argv[0]):
diff --git a/core/tasks/collect_gpl_sources.mk b/core/tasks/collect_gpl_sources.mk
index 30ba62b..fc03f48 100644
--- a/core/tasks/collect_gpl_sources.mk
+++ b/core/tasks/collect_gpl_sources.mk
@@ -17,7 +17,7 @@ gpl_source_tgz := $(call intermediates-dir-for,PACKAGING,gpl_source,HOST,COMMON)
# FORCE since we can't know whether any of the sources changed
$(gpl_source_tgz): PRIVATE_PATHS := $(sort $(patsubst %/, %, $(dir $(ALL_GPL_MODULE_LICENSE_FILES))))
$(gpl_source_tgz) : $(ALL_GPL_MODULE_LICENSE_FILES) FORCE
- @echo Package gpl sources: $@
+ @echo -e ${CL_GRN}"Package gpl sources:"${CL_RST}" $@"
@rm -rf $(dir $@) && mkdir -p $(dir $@)
$(hide) tar cfz $@ --exclude ".git*" $(PRIVATE_PATHS)
diff --git a/core/tasks/dt_image.mk b/core/tasks/dt_image.mk
new file mode 100644
index 0000000..60d45cc
--- /dev/null
+++ b/core/tasks/dt_image.mk
@@ -0,0 +1,48 @@
+#----------------------------------------------------------------------
+# Generate device tree image (dt.img)
+#----------------------------------------------------------------------
+ifeq ($(strip $(BOARD_CUSTOM_BOOTIMG_MK)),)
+ifeq ($(strip $(BOARD_KERNEL_SEPARATED_DT)),true)
+ifneq ($(strip $(BOARD_KERNEL_PREBUILT_DT)),true)
+ifeq ($(strip $(BUILD_TINY_ANDROID)),true)
+include device/qcom/common/dtbtool/Android.mk
+endif
+
+ifeq ($(strip $(TARGET_CUSTOM_DTBTOOL)),)
+DTBTOOL_NAME := dtbToolCM
+else
+DTBTOOL_NAME := $(TARGET_CUSTOM_DTBTOOL)
+endif
+
+DTBTOOL := $(HOST_OUT_EXECUTABLES)/$(DTBTOOL_NAME)$(HOST_EXECUTABLE_SUFFIX)
+
+INSTALLED_DTIMAGE_TARGET := $(PRODUCT_OUT)/dt.img
+
+ifeq ($(strip $(TARGET_CUSTOM_DTBTOOL)),)
+# dtbToolCM will search subdirectories
+possible_dtb_dirs = $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/
+else
+# Most specific paths must come first in possible_dtb_dirs
+possible_dtb_dirs = $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts/ $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/
+endif
+dtb_dir = $(firstword $(wildcard $(possible_dtb_dirs)))
+
+define build-dtimage-target
+ $(call pretty,"Target dt image: $@")
+ $(hide) $(DTBTOOL) $(BOARD_DTBTOOL_ARGS) -o $@ -s $(BOARD_KERNEL_PAGESIZE) -p $(KERNEL_OUT)/scripts/dtc/ $(dtb_dir)
+ $(hide) chmod a+r $@
+endef
+
+$(INSTALLED_DTIMAGE_TARGET): $(DTBTOOL) $(INSTALLED_KERNEL_TARGET)
+ $(build-dtimage-target)
+ @echo -e ${CL_CYN}"Made DT image: $@"${CL_RST}
+
+ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DTIMAGE_TARGET)
+ALL_MODULES.$(LOCAL_MODULE).INSTALLED += $(INSTALLED_DTIMAGE_TARGET)
+
+.PHONY: dtimage
+dtimage: $(INSTALLED_DTIMAGE_TARGET)
+
+endif
+endif
+endif
diff --git a/core/tasks/kernel.mk b/core/tasks/kernel.mk
new file mode 100644
index 0000000..eedf6ca
--- /dev/null
+++ b/core/tasks/kernel.mk
@@ -0,0 +1,367 @@
+# Copyright (C) 2012 The CyanogenMod Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Android makefile to build kernel as a part of Android Build
+#
+# Configuration
+# =============
+#
+# These config vars are usually set in BoardConfig.mk:
+#
+# TARGET_KERNEL_SOURCE = Kernel source dir, optional, defaults
+# to kernel/$(TARGET_DEVICE_DIR)
+# TARGET_KERNEL_CONFIG = Kernel defconfig
+# TARGET_KERNEL_VARIANT_CONFIG = Variant defconfig, optional
+# TARGET_KERNEL_SELINUX_CONFIG = SELinux defconfig, optional
+# TARGET_KERNEL_ADDITIONAL_CONFIG = Additional defconfig, optional
+# TARGET_KERNEL_ARCH = Kernel Arch
+# TARGET_KERNEL_HEADER_ARCH = Optional Arch for kernel headers if
+# different from TARGET_KERNEL_ARCH
+# TARGET_USES_UNCOMPRESSED_KERNEL = 'true' if Kernel is uncompressed,
+# optional, defaults to false
+# TARGET_KERNEL_CROSS_COMPILE_PREFIX = Compiler prefix (e.g. aarch64-linux-android-)
+# defaults to arm-eabi-
+#
+# BOARD_KERNEL_IMAGE_NAME = Built image name, optional,
+# defaults to Image.gz on arm64
+# defaults to Image if TARGET_USES_UNCOMPRESSED_KERNEL
+# defaults to zImage otherwise
+#
+# KERNEL_TOOLCHAIN_PREFIX = Overrides TARGET_KERNEL_CROSS_COMPILE_PREFIX,
+# Set this var in shell to override
+# toolchain specified in BoardConfig.mk
+# KERNEL_TOOLCHAIN = Path to toolchain, if unset, assumes
+# TARGET_KERNEL_CROSS_COMPILE_PREFIX
+# is in PATH
+# USE_CCACHE = Enable ccache (global Android flag)
+#
+# NEED_KERNEL_MODULE_ROOT = Optional, if true, install kernel
+# modules in root instead of system
+
+
+TARGET_AUTO_KDIR := $(shell echo $(TARGET_DEVICE_DIR) | sed -e 's/^device/kernel/g')
+
+## Externally influenced variables
+# kernel location - optional, defaults to kernel/<vendor>/<device>
+TARGET_KERNEL_SOURCE ?= $(TARGET_AUTO_KDIR)
+KERNEL_SRC := $(TARGET_KERNEL_SOURCE)
+# kernel configuration - mandatory
+KERNEL_DEFCONFIG := $(TARGET_KERNEL_CONFIG)
+VARIANT_DEFCONFIG := $(TARGET_KERNEL_VARIANT_CONFIG)
+SELINUX_DEFCONFIG := $(TARGET_KERNEL_SELINUX_CONFIG)
+
+## Internal variables
+KERNEL_OUT := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ
+KERNEL_CONFIG := $(KERNEL_OUT)/.config
+KERNEL_OUT_STAMP := $(KERNEL_OUT)/.mkdir_stamp
+
+TARGET_KERNEL_ARCH := $(strip $(TARGET_KERNEL_ARCH))
+ifeq ($(TARGET_KERNEL_ARCH),)
+KERNEL_ARCH := $(TARGET_ARCH)
+else
+KERNEL_ARCH := $(TARGET_KERNEL_ARCH)
+endif
+
+ifeq ($(KERNEL_ARCH),x86_64)
+KERNEL_DEFCONFIG_ARCH := x86
+else
+KERNEL_DEFCONFIG_ARCH := $(KERNEL_ARCH)
+endif
+KERNEL_DEFCONFIG_SRC := $(KERNEL_SRC)/arch/$(KERNEL_DEFCONFIG_ARCH)/configs/$(KERNEL_DEFCONFIG)
+
+TARGET_KERNEL_HEADER_ARCH := $(strip $(TARGET_KERNEL_HEADER_ARCH))
+ifeq ($(TARGET_KERNEL_HEADER_ARCH),)
+KERNEL_HEADER_ARCH := $(KERNEL_ARCH)
+else
+KERNEL_HEADER_ARCH := $(TARGET_KERNEL_HEADER_ARCH)
+endif
+
+KERNEL_HEADER_DEFCONFIG := $(strip $(KERNEL_HEADER_DEFCONFIG))
+ifeq ($(KERNEL_HEADER_DEFCONFIG),)
+KERNEL_HEADER_DEFCONFIG := $(KERNEL_DEFCONFIG)
+endif
+
+
+ifneq ($(BOARD_KERNEL_IMAGE_NAME),)
+ TARGET_PREBUILT_INT_KERNEL_TYPE := $(BOARD_KERNEL_IMAGE_NAME)
+else
+ ifeq ($(TARGET_USES_UNCOMPRESSED_KERNEL),true)
+ TARGET_PREBUILT_INT_KERNEL_TYPE := Image
+ else
+ ifeq ($(KERNEL_ARCH),arm64)
+ TARGET_PREBUILT_INT_KERNEL_TYPE := Image.gz
+ else
+ TARGET_PREBUILT_INT_KERNEL_TYPE := zImage
+ endif
+ endif
+endif
+
+TARGET_PREBUILT_INT_KERNEL := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/$(TARGET_PREBUILT_INT_KERNEL_TYPE)
+
+# Clear this first to prevent accidental poisoning from env
+MAKE_FLAGS :=
+
+ifeq ($(KERNEL_ARCH),arm64)
+ # Avoid "unsupported RELA relocation: 311" errors (R_AARCH64_ADR_GOT_PAGE)
+ MAKE_FLAGS += CFLAGS_MODULE="-fno-pic"
+ ifeq ($(TARGET_ARCH),arm)
+ KERNEL_CONFIG_OVERRIDE := CONFIG_ANDROID_BINDER_IPC_32BIT=y
+ endif
+endif
+
+ifneq ($(TARGET_KERNEL_ADDITIONAL_CONFIG),)
+KERNEL_ADDITIONAL_CONFIG := $(TARGET_KERNEL_ADDITIONAL_CONFIG)
+KERNEL_ADDITIONAL_CONFIG_SRC := $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG)
+ ifeq ("$(wildcard $(KERNEL_ADDITIONAL_CONFIG_SRC))","")
+ $(warning TARGET_KERNEL_ADDITIONAL_CONFIG '$(TARGET_KERNEL_ADDITIONAL_CONFIG)' doesn't exist)
+ KERNEL_ADDITIONAL_CONFIG_SRC := /dev/null
+ endif
+else
+ KERNEL_ADDITIONAL_CONFIG_SRC := /dev/null
+endif
+
+## Do be discontinued in a future version. Notify builder about target
+## kernel format requirement
+ifeq ($(BOARD_KERNEL_IMAGE_NAME),)
+ifeq ($(BOARD_USES_UBOOT),true)
+ $(error "Please set BOARD_KERNEL_IMAGE_NAME to uImage")
+else ifeq ($(BOARD_USES_UNCOMPRESSED_BOOT),true)
+ $(error "Please set BOARD_KERNEL_IMAGE_NAME to Image")
+endif
+endif
+
+ifeq "$(wildcard $(KERNEL_SRC) )" ""
+ ifneq ($(TARGET_PREBUILT_KERNEL),)
+ HAS_PREBUILT_KERNEL := true
+ NEEDS_KERNEL_COPY := true
+ else
+ $(foreach cf,$(PRODUCT_COPY_FILES), \
+ $(eval _src := $(call word-colon,1,$(cf))) \
+ $(eval _dest := $(call word-colon,2,$(cf))) \
+ $(ifeq kernel,$(_dest), \
+ $(eval HAS_PREBUILT_KERNEL := true)))
+ endif
+
+ ifneq ($(HAS_PREBUILT_KERNEL),)
+ $(warning ***************************************************************)
+ $(warning * Using prebuilt kernel binary instead of source *)
+ $(warning * THIS IS DEPRECATED, AND WILL BE DISCONTINUED *)
+ $(warning * Please configure your device to download the kernel *)
+ $(warning * source repository to $(KERNEL_SRC))
+ $(warning * See http://wiki.cyanogenmod.org/w/Doc:_integrated_kernel_building)
+ $(warning * for more information *)
+ $(warning ***************************************************************)
+ FULL_KERNEL_BUILD := false
+ KERNEL_BIN := $(TARGET_PREBUILT_KERNEL)
+ else
+ $(warning ***************************************************************)
+ $(warning * *)
+ $(warning * No kernel source found, and no fallback prebuilt defined. *)
+ $(warning * Please make sure your device is properly configured to *)
+ $(warning * download the kernel repository to $(KERNEL_SRC))
+ $(warning * and add the TARGET_KERNEL_CONFIG variable to BoardConfig.mk *)
+ $(warning * *)
+ $(warning * As an alternative, define the TARGET_PREBUILT_KERNEL *)
+ $(warning * variable with the path to the prebuilt binary kernel image *)
+ $(warning * in your BoardConfig.mk file *)
+ $(warning * *)
+ $(warning ***************************************************************)
+ $(error "NO KERNEL")
+ endif
+else
+ NEEDS_KERNEL_COPY := true
+ ifeq ($(TARGET_KERNEL_CONFIG),)
+ $(warning **********************************************************)
+ $(warning * Kernel source found, but no configuration was defined *)
+ $(warning * Please add the TARGET_KERNEL_CONFIG variable to your *)
+ $(warning * BoardConfig.mk file *)
+ $(warning **********************************************************)
+ # $(error "NO KERNEL CONFIG")
+ else
+ #$(info Kernel source found, building it)
+ FULL_KERNEL_BUILD := true
+ KERNEL_BIN := $(TARGET_PREBUILT_INT_KERNEL)
+ endif
+endif
+
+ifeq ($(FULL_KERNEL_BUILD),true)
+
+KERNEL_HEADERS_INSTALL := $(KERNEL_OUT)/usr
+KERNEL_HEADERS_INSTALL_STAMP := $(KERNEL_OUT)/.headers_install_stamp
+
+ifeq ($(NEED_KERNEL_MODULE_ROOT),true)
+KERNEL_MODULES_INSTALL := root
+KERNEL_MODULES_OUT := $(TARGET_ROOT_OUT)/lib/modules
+else
+KERNEL_MODULES_INSTALL := system
+KERNEL_MODULES_OUT := $(TARGET_OUT)/lib/modules
+endif
+
+TARGET_KERNEL_CROSS_COMPILE_PREFIX := $(strip $(TARGET_KERNEL_CROSS_COMPILE_PREFIX))
+ifeq ($(TARGET_KERNEL_CROSS_COMPILE_PREFIX),)
+KERNEL_TOOLCHAIN_PREFIX ?= arm-eabi-
+else
+KERNEL_TOOLCHAIN_PREFIX ?= $(TARGET_KERNEL_CROSS_COMPILE_PREFIX)
+endif
+
+ifeq ($(KERNEL_TOOLCHAIN),)
+KERNEL_TOOLCHAIN_PATH := $(KERNEL_TOOLCHAIN_PREFIX)
+else
+ifneq ($(KERNEL_TOOLCHAIN_PREFIX),)
+KERNEL_TOOLCHAIN_PATH := $(KERNEL_TOOLCHAIN)/$(KERNEL_TOOLCHAIN_PREFIX)
+endif
+endif
+
+ifneq ($(USE_CCACHE),)
+ ccache := $(ANDROID_BUILD_TOP)/prebuilts/misc/$(HOST_PREBUILT_TAG)/ccache/ccache
+ # Check that the executable is here.
+ ccache := $(strip $(wildcard $(ccache)))
+endif
+
+KERNEL_CROSS_COMPILE := CROSS_COMPILE="$(ccache) $(KERNEL_TOOLCHAIN_PATH)"
+ccache =
+
+define mv-modules
+ mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.order`;\
+ if [ "$$mdpath" != "" ];then\
+ mpath=`dirname $$mdpath`;\
+ ko=`find $$mpath/kernel -type f -name *.ko`;\
+ for i in $$ko; do $(KERNEL_TOOLCHAIN_PATH)strip --strip-unneeded $$i;\
+ mv $$i $(KERNEL_MODULES_OUT)/; done;\
+ fi
+endef
+
+define clean-module-folder
+ mdpath=`find $(KERNEL_MODULES_OUT) -type f -name modules.order`;\
+ if [ "$$mdpath" != "" ];then\
+ mpath=`dirname $$mdpath`; rm -rf $$mpath;\
+ fi
+endef
+
+ifeq ($(HOST_OS),darwin)
+ MAKE_FLAGS += C_INCLUDE_PATH=$(ANDROID_BUILD_TOP)/external/elfutils/src/libelf/
+endif
+
+ifeq ($(TARGET_KERNEL_MODULES),)
+ TARGET_KERNEL_MODULES := no-external-modules
+endif
+
+$(KERNEL_OUT_STAMP):
+ $(hide) mkdir -p $(KERNEL_OUT)
+ $(hide) rm -rf $(KERNEL_MODULES_OUT)
+ $(hide) mkdir -p $(KERNEL_MODULES_OUT)
+ $(hide) touch $@
+
+KERNEL_ADDITIONAL_CONFIG_OUT := $(KERNEL_OUT)/.additional_config
+
+.PHONY: force_additional_config
+$(KERNEL_ADDITIONAL_CONFIG_OUT): force_additional_config
+ $(hide) cmp -s $(KERNEL_ADDITIONAL_CONFIG_SRC) $@ || cp $(KERNEL_ADDITIONAL_CONFIG_SRC) $@;
+
+$(KERNEL_CONFIG): $(KERNEL_OUT_STAMP) $(KERNEL_DEFCONFIG_SRC) $(KERNEL_ADDITIONAL_CONFIG_OUT)
+ @echo -e ${CL_GRN}"Building Kernel Config"${CL_RST}
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG)
+ $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi
+ $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \
+ echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \
+ $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi
+
+TARGET_KERNEL_BINARIES: $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG) $(KERNEL_HEADERS_INSTALL_STAMP)
+ @echo -e ${CL_GRN}"Building Kernel"${CL_RST}
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(TARGET_PREBUILT_INT_KERNEL_TYPE)
+ $(hide) if grep -q 'CONFIG_OF=y' $(KERNEL_CONFIG) ; \
+ then \
+ echo -e ${CL_GRN}"Building DTBs"${CL_RST} ; \
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) dtbs ; \
+ else \
+ echo "DTBs not enabled" ; \
+ fi ;
+ $(hide) if grep -q 'CONFIG_MODULES=y' $(KERNEL_CONFIG) ; \
+ then \
+ echo -e ${CL_GRN}"Building Kernel Modules"${CL_RST} ; \
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules && \
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) INSTALL_MOD_PATH=../../$(KERNEL_MODULES_INSTALL) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) modules_install && \
+ $(mv-modules) && \
+ $(clean-module-folder) ; \
+ else \
+ echo "Kernel Modules not enabled" ; \
+ fi ;
+
+
+$(TARGET_KERNEL_MODULES): TARGET_KERNEL_BINARIES
+
+$(TARGET_PREBUILT_INT_KERNEL): $(TARGET_KERNEL_MODULES)
+ $(mv-modules)
+ $(clean-module-folder)
+
+$(KERNEL_HEADERS_INSTALL_STAMP): $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG)
+ @echo -e ${CL_GRN}"Building Kernel Headers"${CL_RST}
+ $(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
+ rm -f ../$(KERNEL_CONFIG); \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_HEADER_DEFCONFIG); \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_HEADER_ARCH) $(KERNEL_CROSS_COMPILE) headers_install; fi
+ $(hide) if [ "$(KERNEL_HEADER_DEFCONFIG)" != "$(KERNEL_DEFCONFIG)" ]; then \
+ echo "Used a different defconfig for header generation"; \
+ rm -f ../$(KERNEL_CONFIG); \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) VARIANT_DEFCONFIG=$(VARIANT_DEFCONFIG) SELINUX_DEFCONFIG=$(SELINUX_DEFCONFIG) $(KERNEL_DEFCONFIG); fi
+ $(hide) if [ ! -z "$(KERNEL_CONFIG_OVERRIDE)" ]; then \
+ echo "Overriding kernel config with '$(KERNEL_CONFIG_OVERRIDE)'"; \
+ echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) oldconfig; fi
+ $(hide) if [ ! -z "$(KERNEL_ADDITIONAL_CONFIG)" ]; then \
+ echo "Using additional config '$(KERNEL_ADDITIONAL_CONFIG)'"; \
+ $(KERNEL_SRC)/scripts/kconfig/merge_config.sh -m -O $(KERNEL_OUT) $(KERNEL_OUT)/.config $(KERNEL_SRC)/arch/$(KERNEL_ARCH)/configs/$(KERNEL_ADDITIONAL_CONFIG); \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) KCONFIG_ALLCONFIG=$(KERNEL_OUT)/.config alldefconfig; fi
+ $(hide) touch $@
+
+# provide this rule because there are dependencies on this throughout the repo
+$(KERNEL_HEADERS_INSTALL): $(KERNEL_HEADERS_INSTALL_STAMP)
+
+kerneltags: $(KERNEL_OUT_STAMP) $(KERNEL_CONFIG)
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) tags
+
+kernelconfig: KERNELCONFIG_MODE := menuconfig
+kernelxconfig: KERNELCONFIG_MODE := xconfig
+kernelxconfig kernelconfig: $(KERNEL_OUT_STAMP)
+ $(MAKE) $(MAKE_FLAGS) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNEL_DEFCONFIG)
+ env KCONFIG_NOTIMESTAMP=true \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) $(KERNELCONFIG_MODE)
+ env KCONFIG_NOTIMESTAMP=true \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) savedefconfig
+ cp $(KERNEL_OUT)/defconfig $(KERNEL_DEFCONFIG_SRC)
+
+alldefconfig: $(KERNEL_OUT_STAMP)
+ env KCONFIG_NOTIMESTAMP=true \
+ $(MAKE) -C $(KERNEL_SRC) O=$(KERNEL_OUT) ARCH=$(KERNEL_ARCH) $(KERNEL_CROSS_COMPILE) alldefconfig
+
+endif # FULL_KERNEL_BUILD
+
+## Install it
+
+ifeq ($(NEEDS_KERNEL_COPY),true)
+file := $(INSTALLED_KERNEL_TARGET)
+ALL_PREBUILT += $(file)
+$(file) : $(KERNEL_BIN) | $(ACP)
+ $(transform-prebuilt-to-target)
+
+ALL_PREBUILT += $(INSTALLED_KERNEL_TARGET)
+endif
+
+.PHONY: kernel
+kernel: $(INSTALLED_KERNEL_TARGET)
diff --git a/core/tasks/oem_image.mk b/core/tasks/oem_image.mk
index 32d56a7..8a06670 100644
--- a/core/tasks/oem_image.mk
+++ b/core/tasks/oem_image.mk
@@ -15,7 +15,16 @@
#
# We build oem.img only if it's asked for.
+skip_oem_image := true
ifneq ($(filter $(MAKECMDGOALS),oem_image),)
+ skip_oem_image := false
+endif
+
+ifneq ($(BOARD_OEMIMAGE_FILE_SYSTEM_TYPE),)
+ skip_oem_image := false
+endif
+
+ifneq ($(skip_oem_image),true)
ifndef BOARD_OEMIMAGE_PARTITION_SIZE
$(error BOARD_OEMIMAGE_PARTITION_SIZE is not set.)
endif
@@ -43,4 +52,4 @@ $(INSTALLED_OEMIMAGE_TARGET) : $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_OEMIMAGE_F
oem_image : $(INSTALLED_OEMIMAGE_TARGET)
$(call dist-for-goals, oem_image, $(INSTALLED_OEMIMAGE_TARGET))
-endif # oem_image in $(MAKECMDGOALS)
+endif
diff --git a/core/tasks/product-graph.mk b/core/tasks/product-graph.mk
index db2cf71..38f1936 100644
--- a/core/tasks/product-graph.mk
+++ b/core/tasks/product-graph.mk
@@ -70,7 +70,7 @@ $(products_graph): PRIVATE_PRODUCTS := $(really_all_products)
$(products_graph): PRIVATE_PRODUCTS_FILTER := $(products_list)
$(products_graph): $(this_makefile)
- @echo Product graph DOT: $@ for $(PRIVATE_PRODUCTS_FILTER)
+ @echo -e ${CL_GRN}"Product graph DOT:"${CL_RST}" $@ for $(PRIVATE_PRODUCTS_FILTER)"
$(hide) echo 'digraph {' > $@.in
$(hide) echo 'graph [ ratio=.5 ];' >> $@.in
$(hide) $(foreach p,$(PRIVATE_PRODUCTS), \
@@ -89,7 +89,7 @@ endef
# $(1) product file
define transform-product-debug
$(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile)
- @echo Product debug info file: $$@
+ @echo -e ${CL_GRN}"Product debug info file:"${CL_RST}" $$@"
$(hide) rm -f $$@
$(hide) mkdir -p $$(dir $$@)
$(hide) echo 'FILE=$(strip $(1))' >> $$@
@@ -105,6 +105,7 @@ $(OUT_DIR)/products/$(strip $(1)).txt: $(this_makefile)
$(hide) echo 'PRODUCT_DEFAULT_PROPERTY_OVERRIDES=$$(PRODUCTS.$(strip $(1)).PRODUCT_DEFAULT_PROPERTY_OVERRIDES)' >> $$@
$(hide) echo 'PRODUCT_CHARACTERISTICS=$$(PRODUCTS.$(strip $(1)).PRODUCT_CHARACTERISTICS)' >> $$@
$(hide) echo 'PRODUCT_COPY_FILES=$$(PRODUCTS.$(strip $(1)).PRODUCT_COPY_FILES)' >> $$@
+ $(hide) echo 'PRODUCT_COPY_FILES_OVERRIDES=$$(PRODUCTS.$(strip $(1)).PRODUCT_COPY_FILES_OVERRIDES)' >> $$@
$(hide) echo 'PRODUCT_OTA_PUBLIC_KEYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_OTA_PUBLIC_KEYS)' >> $$@
$(hide) echo 'PRODUCT_EXTRA_RECOVERY_KEYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_EXTRA_RECOVERY_KEYS)' >> $$@
$(hide) echo 'PRODUCT_PACKAGE_OVERLAYS=$$(PRODUCTS.$(strip $(1)).PRODUCT_PACKAGE_OVERLAYS)' >> $$@
@@ -122,7 +123,7 @@ $(call product-debug-filename, $(p)): \
$(OUT_DIR)/products/$(strip $(1)).txt \
build/tools/product_debug.py \
$(this_makefile)
- @echo Product debug html file: $$@
+ @echo -e ${CL_GRN}"Product debug html file:"${CL_RST}" $$@"
$(hide) mkdir -p $$(dir $$@)
$(hide) cat $$< | build/tools/product_debug.py > $$@
endef
@@ -134,11 +135,11 @@ $(foreach p,$(really_all_products), \
)
$(products_pdf): $(products_graph)
- @echo Product graph PDF: $@
+ @echo -e ${CL_GRN}"Product graph PDF:"${CL_RST}" $@"
dot -Tpdf -Nshape=box -o $@ $<
$(products_svg): $(products_graph) $(product_debug_files)
- @echo Product graph SVG: $@
+ @echo -e ${CL_GRN}"Product graph SVG:"${CL_RST}" $@"
dot -Tsvg -Nshape=box -o $@ $<
product-graph: $(products_pdf) $(products_svg)
diff --git a/core/tasks/sdk-addon.mk b/core/tasks/sdk-addon.mk
index 5ac9b7d..620c50c 100644
--- a/core/tasks/sdk-addon.mk
+++ b/core/tasks/sdk-addon.mk
@@ -105,7 +105,7 @@ $(full_target): PRIVATE_DOCS_DIRS := $(addprefix $(OUT_DOCS)/, $(doc_modules))
$(full_target): PRIVATE_STAGING_DIR := $(call append-path,$(staging),$(addon_dir_leaf))
$(full_target): $(sdk_addon_deps) | $(ACP)
- @echo Packaging SDK Addon: $@
+ @echo -e ${CL_GRN}"Packaging SDK Addon:"${CL_RST}" $@"
$(hide) mkdir -p $(PRIVATE_STAGING_DIR)/docs
$(hide) for d in $(PRIVATE_DOCS_DIRS); do \
$(ACP) -r $$d $(PRIVATE_STAGING_DIR)/docs ;\
diff --git a/envsetup.sh b/envsetup.sh
index 6ad3a9e..03a6424 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -4,12 +4,18 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y
- lunch: lunch <product_name>-<build_variant>
- tapas: tapas [<App1> <App2> ...] [arm|x86|mips|armv5|arm64|x86_64|mips64] [eng|userdebug|user]
- croot: Changes directory to the top of the tree.
+- cout: Changes directory to out.
- m: Makes from the top of the tree.
- mm: Builds all of the modules in the current directory, but not their dependencies.
- mmm: Builds all of the modules in the supplied directories, but not their dependencies.
To limit the modules being built use the syntax: mmm dir/:target1,target2.
- mma: Builds all of the modules in the current directory, and their dependencies.
+- mmp: Builds all of the modules in the current directory and pushes them to the device.
+- mmap: Builds all of the modules in the current directory, and its dependencies, then pushes the package to the device.
+- mmmp: Builds all of the modules in the supplied directories and pushes them to the device.
- mmma: Builds all of the modules in the supplied directories, and their dependencies.
+- mms: Short circuit builder. Quickly re-build the kernel, rootfs, boot and system images
+ without deep dependencies. Requires the full build to have run before.
- cgrep: Greps on all local C/C++ files.
- ggrep: Greps on all local Gradle files.
- jgrep: Greps on all local Java files.
@@ -18,6 +24,19 @@ Invoke ". build/envsetup.sh" from your shell to add the following functions to y
- sepgrep: Greps on all local sepolicy files.
- sgrep: Greps on all local source files.
- godir: Go to the directory containing a file.
+- cmremote: Add git remote for CM Gerrit Review
+- cmgerrit: A Git wrapper that fetches/pushes patch from/to CM Gerrit Review
+- cmrebase: Rebase a Gerrit change and push it again
+- aospremote: Add git remote for matching AOSP repository
+- cafremote: Add git remote for matching CodeAurora repository.
+- mka: Builds using SCHED_BATCH on all processors
+- mkap: Builds the module(s) using mka and pushes them to the device.
+- cmka: Cleans and builds using mka.
+- repolastsync: Prints date and time of last repo sync.
+- reposync: Parallel repo sync using ionice and SCHED_BATCH
+- repopick: Utility to fetch changes from Gerrit.
+- installboot: Installs a boot.img to the connected device.
+- installrecovery: Installs a recovery.img to the connected device.
Environemnt options:
- SANITIZE_HOST: Set to 'true' to use ASAN for all host modules. Note that
@@ -27,12 +46,9 @@ Environemnt options:
Look at the source to view more functions. The complete list is:
EOF
T=$(gettop)
- local A
- A=""
for i in `cat $T/build/envsetup.sh | sed -n "/^[ \t]*function /s/function \([a-z_]*\).*/\1/p" | sort | uniq`; do
- A="$A $i"
- done
- echo $A
+ echo "$i"
+ done | column
}
# Get the value of a build variable as an absolute path.
@@ -67,6 +83,15 @@ function check_product()
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
+
+ if (echo -n $1 | grep -q -e "^cm_") ; then
+ CM_BUILD=$(echo -n $1 | sed -e 's/^cm_//g')
+ export BUILD_NUMBER=$((date +%s%N ; echo $CM_BUILD; hostname) | openssl sha1 | sed -e 's/.*=//g; s/ //g' | cut -c1-10)
+ else
+ CM_BUILD=
+ fi
+ export CM_BUILD
+
TARGET_PRODUCT=$1 \
TARGET_BUILD_VARIANT= \
TARGET_BUILD_TYPE= \
@@ -129,6 +154,7 @@ function setpaths()
# defined in core/config.mk
targetgccversion=$(get_build_var TARGET_GCC_VERSION)
targetgccversion2=$(get_build_var 2ND_TARGET_GCC_VERSION)
+ targetlegacygccversion=$(get_build_var TARGET_LEGACY_GCC_VERSION)
export TARGET_GCC_VERSION=$targetgccversion
# The gcc toolchain does not exists for windows/cygwin. In this case, do not reference it.
@@ -164,7 +190,7 @@ function setpaths()
case $ARCH in
arm)
# Legacy toolchain configuration used for ARM kernel compilation
- toolchaindir=arm/arm-eabi-$targetgccversion/bin
+ toolchaindir=arm/arm-eabi-$targetlegacygccversion/bin
if [ -d "$gccprebuiltdir/$toolchaindir" ]; then
export ARM_EABI_TOOLCHAIN="$gccprebuiltdir/$toolchaindir"
ANDROID_KERNEL_TOOLCHAIN_PATH="$ARM_EABI_TOOLCHAIN":
@@ -213,6 +239,10 @@ function setpaths()
unset ANDROID_HOST_OUT
export ANDROID_HOST_OUT=$(get_abs_build_var HOST_OUT)
+ if [ -n "$ANDROID_CCACHE_DIR" ]; then
+ export CCACHE_DIR=$ANDROID_CCACHE_DIR
+ fi
+
# needed for building linux on MacOS
# TODO: fix the path
#export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include
@@ -235,7 +265,6 @@ function set_stuff_for_environment()
setpaths
set_sequence_number
- export ANDROID_BUILD_TOP=$(gettop)
# With this environment variable new GCC can apply colors to warnings/errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
export ASAN_OPTIONS=detect_leaks=0
@@ -253,35 +282,42 @@ function settitle()
local product=$TARGET_PRODUCT
local variant=$TARGET_BUILD_VARIANT
local apps=$TARGET_BUILD_APPS
+ if [ -z "$PROMPT_COMMAND" ]; then
+ # No prompts
+ PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\""
+ elif [ -z "$(echo $PROMPT_COMMAND | grep '033]0;')" ]; then
+ # Prompts exist, but no hardstatus
+ PROMPT_COMMAND="echo -ne \"\033]0;${USER}@${HOSTNAME}: ${PWD}\007\";${PROMPT_COMMAND}"
+ fi
+ if [ ! -z "$ANDROID_PROMPT_PREFIX" ]; then
+ PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/$ANDROID_PROMPT_PREFIX //g')"
+ fi
+
if [ -z "$apps" ]; then
- export PROMPT_COMMAND="echo -ne \"\033]0;[${arch}-${product}-${variant}] ${USER}@${HOSTNAME}: ${PWD}\007\""
+ ANDROID_PROMPT_PREFIX="[${arch}-${product}-${variant}]"
else
- export PROMPT_COMMAND="echo -ne \"\033]0;[$arch $apps $variant] ${USER}@${HOSTNAME}: ${PWD}\007\""
+ ANDROID_PROMPT_PREFIX="[$arch $apps $variant]"
fi
+ export ANDROID_PROMPT_PREFIX
+
+ # Inject build data into hardstatus
+ export PROMPT_COMMAND="$(echo $PROMPT_COMMAND | sed -e 's/\\033]0;\(.*\)\\007/\\033]0;$ANDROID_PROMPT_PREFIX \1\\007/g')"
fi
}
-function addcompletions()
+function check_bash_version()
{
- local T dir f
-
# Keep us from trying to run in something that isn't bash.
if [ -z "${BASH_VERSION}" ]; then
- return
+ return 1
fi
# Keep us from trying to run in bash that's too old.
- if [ ${BASH_VERSINFO[0]} -lt 3 ]; then
- return
+ if [ "${BASH_VERSINFO[0]}" -lt 4 ] ; then
+ return 2
fi
- dir="sdk/bash_completion"
- if [ -d ${dir} ]; then
- for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do
- echo "including $f"
- . $f
- done
- fi
+ return 0
}
function choosetype()
@@ -472,23 +508,82 @@ function print_lunch_menu()
local uname=$(uname)
echo
echo "You're building on" $uname
+ if [ "$(uname)" = "Darwin" ] ; then
+ echo " (ohai, koush!)"
+ fi
echo
- echo "Lunch menu... pick a combo:"
+ if [ "z${CM_DEVICES_ONLY}" != "z" ]; then
+ echo "Breakfast menu... pick a combo:"
+ else
+ echo "Lunch menu... pick a combo:"
+ fi
local i=1
local choice
for choice in ${LUNCH_MENU_CHOICES[@]}
do
- echo " $i. $choice"
+ echo " $i. $choice "
i=$(($i+1))
- done
+ done | column
+
+ if [ "z${CM_DEVICES_ONLY}" != "z" ]; then
+ echo "... and don't forget the bacon!"
+ fi
echo
}
+function brunch()
+{
+ breakfast $*
+ if [ $? -eq 0 ]; then
+ mka bacon
+ else
+ echo "No such item in brunch menu. Try 'breakfast'"
+ return 1
+ fi
+ return $?
+}
+
+function breakfast()
+{
+ target=$1
+ local variant=$2
+ CM_DEVICES_ONLY="true"
+ unset LUNCH_MENU_CHOICES
+ add_lunch_combo full-eng
+ for f in `/bin/ls vendor/cm/vendorsetup.sh 2> /dev/null`
+ do
+ echo "including $f"
+ . $f
+ done
+ unset f
+
+ if [ $# -eq 0 ]; then
+ # No arguments, so let's have the full menu
+ lunch
+ else
+ echo "z$target" | grep -q "-"
+ if [ $? -eq 0 ]; then
+ # A buildtype was specified, assume a full device name
+ lunch $target
+ else
+ # This is probably just the CM model name
+ if [ -z "$variant" ]; then
+ variant="userdebug"
+ fi
+ lunch cm_$target-$variant
+ fi
+ fi
+ return $?
+}
+
+alias bib=breakfast
+
function lunch()
{
local answer
+ LUNCH_MENU_CHOICES=($(for l in ${LUNCH_MENU_CHOICES[@]}; do echo "$l"; done | sort))
if [ "$1" ] ; then
answer=$1
@@ -527,6 +622,17 @@ function lunch()
check_product $product
if [ $? -ne 0 ]
then
+ # if we can't find a product, try to grab it off the CM github
+ T=$(gettop)
+ pushd $T > /dev/null
+ build/tools/roomservice.py $product
+ popd > /dev/null
+ check_product $product
+ else
+ build/tools/roomservice.py $product true
+ fi
+ if [ $? -ne 0 ]
+ then
echo
echo "** Don't have a product spec for: '$product'"
echo "** Do you have the right repo manifest?"
@@ -555,6 +661,8 @@ function lunch()
echo
+ fixup_common_out_dir
+
set_stuff_for_environment
printconfig
}
@@ -570,7 +678,7 @@ function _lunch()
COMPREPLY=( $(compgen -W "${LUNCH_MENU_CHOICES[*]}" -- ${cur}) )
return 0
}
-complete -F _lunch lunch
+complete -F _lunch lunch 2>/dev/null
# Configures the build to build unbundled apps.
# Run tapas with one or more app names (from LOCAL_PACKAGE_NAME)
@@ -623,6 +731,57 @@ function tapas()
printconfig
}
+function eat()
+{
+ if [ "$OUT" ] ; then
+ MODVERSION=$(get_build_var CM_VERSION)
+ ZIPFILE=cm-$MODVERSION.zip
+ ZIPPATH=$OUT/$ZIPFILE
+ if [ ! -f $ZIPPATH ] ; then
+ echo "Nothing to eat"
+ return 1
+ fi
+ adb start-server # Prevent unexpected starting server message from adb get-state in the next line
+ if [ $(adb get-state) != device -a $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) != 0 ] ; then
+ echo "No device is online. Waiting for one..."
+ echo "Please connect USB and/or enable USB debugging"
+ until [ $(adb get-state) = device -o $(adb shell test -e /sbin/recovery 2> /dev/null; echo $?) = 0 ];do
+ sleep 1
+ done
+ echo "Device Found.."
+ fi
+ if (adb shell getprop ro.cm.device | grep -q "$CM_BUILD");
+ then
+ # if adbd isn't root we can't write to /cache/recovery/
+ adb root
+ sleep 1
+ adb wait-for-device
+ cat << EOF > /tmp/command
+--sideload_auto_reboot
+EOF
+ if adb push /tmp/command /cache/recovery/ ; then
+ echo "Rebooting into recovery for sideload installation"
+ adb reboot recovery
+ adb wait-for-sideload
+ adb sideload $ZIPPATH
+ fi
+ rm /tmp/command
+ else
+ echo "Nothing to eat"
+ return 1
+ fi
+ return $?
+ else
+ echo "The connected device does not appear to be $CM_BUILD, run away!"
+ fi
+}
+
+function omnom
+{
+ brunch $*
+ eat
+}
+
function gettop
{
local TOPFILE=build/core/envsetup.mk
@@ -706,7 +865,6 @@ function mm()
local M=$(findmakefile)
local MODULES=
local GET_INSTALL_PATH=
- local ARGS=
# Remove the path to top as the makefilepath needs to be relative
local M=`echo $M|sed 's:'$T'/::'`
if [ ! "$T" ]; then
@@ -723,12 +881,12 @@ function mm()
done
if [ -n "$GET_INSTALL_PATH" ]; then
MODULES=
- ARGS=GET-INSTALL-PATH
+ # set all args to 'GET-INSTALL-PATH'
+ set -- GET-INSTALL-PATH
else
MODULES=all_modules
- ARGS=$@
fi
- ONE_SHOT_MAKEFILE=$M $DRV make -C $T -f build/core/main.mk $MODULES $ARGS
+ ONE_SHOT_MAKEFILE=$M $DRV make -C $T -f build/core/main.mk $MODULES "$@"
fi
fi
}
@@ -743,8 +901,15 @@ function mmm()
local ARGS=
local DIR TO_CHOP
local GET_INSTALL_PATH=
- local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
- local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+
+ if [ "$(__detect_shell)" = "zsh" ]; then
+ set -lA DASH_ARGS $(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
+ set -lA DIRS $(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+ else
+ local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
+ local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+ fi
+
for DIR in $DIRS ; do
MODULES=`echo $DIR | sed -n -e 's/.*:\(.*$\)/\1/p' | sed 's/,/ /'`
if [ "$MODULES" = "" ]; then
@@ -766,7 +931,12 @@ function mmm()
case $DIR in
showcommands | snod | dist | incrementaljavac | *=*) ARGS="$ARGS $DIR";;
GET-INSTALL-PATH) GET_INSTALL_PATH=$DIR;;
- *) echo "No Android.mk in $DIR."; return 1;;
+ *) if [ -d $DIR ]; then
+ echo "No Android.mk in $DIR.";
+ else
+ echo "Couldn't locate the directory $DIR";
+ fi
+ return 1;;
esac
fi
done
@@ -802,8 +972,13 @@ function mmma()
local T=$(gettop)
local DRV=$(getdriver $T)
if [ "$T" ]; then
- local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
- local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+ if [ "$(__detect_shell)" = "zsh" ]; then
+ set -lA DASH_ARGS $(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
+ set -lA DIRS $(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+ else
+ local DASH_ARGS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^-.*$/')
+ local DIRS=$(echo "$@" | awk -v RS=" " -v ORS=" " '/^[^-].*$/')
+ fi
local MY_PWD=`PWD= /bin/pwd`
if [ "$MY_PWD" = "$T" ]; then
MY_PWD=
@@ -844,6 +1019,15 @@ function croot()
fi
}
+function cout()
+{
+ if [ "$OUT" ]; then
+ cd $OUT
+ else
+ echo "Couldn't locate out directory. Try setting OUT."
+ fi
+}
+
function cproj()
{
TOPFILE=build/core/envsetup.mk
@@ -1057,6 +1241,106 @@ function is64bit()
fi
}
+function dddclient()
+{
+ local OUT_ROOT=$(get_abs_build_var PRODUCT_OUT)
+ local OUT_SYMBOLS=$(get_abs_build_var TARGET_OUT_UNSTRIPPED)
+ local OUT_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_SHARED_LIBRARIES_UNSTRIPPED)
+ local OUT_VENDOR_SO_SYMBOLS=$(get_abs_build_var TARGET_OUT_VENDOR_SHARED_LIBRARIES_UNSTRIPPED)
+ local OUT_EXE_SYMBOLS=$(get_symbols_directory)
+ local PREBUILTS=$(get_abs_build_var ANDROID_PREBUILTS)
+ local ARCH=$(get_build_var TARGET_ARCH)
+ local GDB
+ case "$ARCH" in
+ arm) GDB=arm-linux-androideabi-gdb;;
+ arm64) GDB=arm-linux-androideabi-gdb; GDB64=aarch64-linux-android-gdb;;
+ mips|mips64) GDB=mips64el-linux-android-gdb;;
+ x86) GDB=x86_64-linux-android-gdb;;
+ x86_64) GDB=x86_64-linux-android-gdb;;
+ *) echo "Unknown arch $ARCH"; return 1;;
+ esac
+
+ if [ "$OUT_ROOT" -a "$PREBUILTS" ]; then
+ local EXE="$1"
+ if [ "$EXE" ] ; then
+ EXE=$1
+ if [[ $EXE =~ ^[^/].* ]] ; then
+ EXE="system/bin/"$EXE
+ fi
+ else
+ EXE="app_process"
+ fi
+
+ local PORT="$2"
+ if [ "$PORT" ] ; then
+ PORT=$2
+ else
+ PORT=":5039"
+ fi
+
+ local PID="$3"
+ if [ "$PID" ] ; then
+ if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then
+ PID=`pid $3`
+ if [[ ! "$PID" =~ ^[0-9]+$ ]] ; then
+ # that likely didn't work because of returning multiple processes
+ # try again, filtering by root processes (don't contain colon)
+ PID=`adb shell ps | \grep $3 | \grep -v ":" | awk '{print $2}'`
+ if [[ ! "$PID" =~ ^[0-9]+$ ]]
+ then
+ echo "Couldn't resolve '$3' to single PID"
+ return 1
+ else
+ echo ""
+ echo "WARNING: multiple processes matching '$3' observed, using root process"
+ echo ""
+ fi
+ fi
+ fi
+ adb forward "tcp$PORT" "tcp$PORT"
+ local USE64BIT="$(is64bit $PID)"
+ adb shell gdbserver$USE64BIT $PORT --attach $PID &
+ sleep 2
+ else
+ echo ""
+ echo "If you haven't done so already, do this first on the device:"
+ echo " gdbserver $PORT /system/bin/$EXE"
+ echo " or"
+ echo " gdbserver $PORT --attach <PID>"
+ echo ""
+ fi
+
+ OUT_SO_SYMBOLS=$OUT_SO_SYMBOLS$USE64BIT
+ OUT_VENDOR_SO_SYMBOLS=$OUT_VENDOR_SO_SYMBOLS$USE64BIT
+
+ echo >|"$OUT_ROOT/gdbclient.cmds" "set solib-absolute-prefix $OUT_SYMBOLS"
+ echo >>"$OUT_ROOT/gdbclient.cmds" "set solib-search-path $OUT_SO_SYMBOLS:$OUT_SO_SYMBOLS/hw:$OUT_SO_SYMBOLS/ssl/engines:$OUT_SO_SYMBOLS/drm:$OUT_SO_SYMBOLS/egl:$OUT_SO_SYMBOLS/soundfx:$OUT_VENDOR_SO_SYMBOLS:$OUT_VENDOR_SO_SYMBOLS/hw:$OUT_VENDOR_SO_SYMBOLS/egl"
+ echo >>"$OUT_ROOT/gdbclient.cmds" "source $ANDROID_BUILD_TOP/development/scripts/gdb/dalvik.gdb"
+ echo >>"$OUT_ROOT/gdbclient.cmds" "target remote $PORT"
+ # Enable special debugging for ART processes.
+ if [[ $EXE =~ (^|/)(app_process|dalvikvm)(|32|64)$ ]]; then
+ echo >> "$OUT_ROOT/gdbclient.cmds" "art-on"
+ fi
+ echo >>"$OUT_ROOT/gdbclient.cmds" ""
+
+ local WHICH_GDB=
+ # 64-bit exe found
+ if [ "$USE64BIT" != "" ] ; then
+ WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB64
+ # 32-bit exe / 32-bit platform
+ elif [ "$(get_build_var TARGET_2ND_ARCH)" = "" ]; then
+ WHICH_GDB=$ANDROID_TOOLCHAIN/$GDB
+ # 32-bit exe / 64-bit platform
+ else
+ WHICH_GDB=$ANDROID_TOOLCHAIN_2ND_ARCH/$GDB
+ fi
+
+ ddd --debugger $WHICH_GDB -x "$OUT_ROOT/gdbclient.cmds" "$OUT_EXE_SYMBOLS/$EXE"
+ else
+ echo "Unable to determine build system output dir."
+ fi
+}
+
case `uname -s` in
Darwin)
function sgrep()
@@ -1399,6 +1683,707 @@ function godir () {
\cd $T/$pathname
}
+function cmremote()
+{
+ if ! git rev-parse --git-dir &> /dev/null
+ then
+ echo ".git directory not found. Please run this from the root directory of the Android repository you wish to set up."
+ return 1
+ fi
+ git remote rm cmremote 2> /dev/null
+ GERRIT_REMOTE=$(git config --get remote.github.projectname)
+ CMUSER=$(git config --get review.review.cyanogenmod.org.username)
+ if [ -z "$CMUSER" ]
+ then
+ git remote add cmremote ssh://review.cyanogenmod.org:29418/$GERRIT_REMOTE
+ else
+ git remote add cmremote ssh://$CMUSER@review.cyanogenmod.org:29418/$GERRIT_REMOTE
+ fi
+ echo "Remote 'cmremote' created"
+}
+
+function aospremote()
+{
+ if ! git rev-parse --git-dir &> /dev/null
+ then
+ echo ".git directory not found. Please run this from the root directory of the Android repository you wish to set up."
+ return 1
+ fi
+ git remote rm aosp 2> /dev/null
+ PROJECT=$(pwd -P | sed "s#$ANDROID_BUILD_TOP\/##")
+ if (echo $PROJECT | grep -qv "^device")
+ then
+ PFX="platform/"
+ fi
+ git remote add aosp https://android.googlesource.com/$PFX$PROJECT
+ echo "Remote 'aosp' created"
+}
+
+function cafremote()
+{
+ if ! git rev-parse --git-dir &> /dev/null
+ then
+ echo ".git directory not found. Please run this from the root directory of the Android repository you wish to set up."
+ return 1
+ fi
+ git remote rm caf 2> /dev/null
+ PROJECT=$(pwd -P | sed "s#$ANDROID_BUILD_TOP\/##")
+ if (echo $PROJECT | grep -qv "^device")
+ then
+ PFX="platform/"
+ fi
+ git remote add caf git://codeaurora.org/$PFX$PROJECT
+ echo "Remote 'caf' created"
+}
+
+function installboot()
+{
+ if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ];
+ then
+ echo "No recovery.fstab found. Build recovery first."
+ return 1
+ fi
+ if [ ! -e "$OUT/boot.img" ];
+ then
+ echo "No boot.img found. Run make bootimage first."
+ return 1
+ fi
+ PARTITION=`grep "^\/boot" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
+ if [ -z "$PARTITION" ];
+ then
+ # Try for RECOVERY_FSTAB_VERSION = 2
+ PARTITION=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}`
+ PARTITION_TYPE=`grep "[[:space:]]\/boot[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
+ if [ -z "$PARTITION" ];
+ then
+ echo "Unable to determine boot partition."
+ return 1
+ fi
+ fi
+ adb start-server
+ adb wait-for-online
+ adb root
+ sleep 1
+ adb wait-for-online shell mount /system 2>&1 > /dev/null
+ adb wait-for-online remount
+ if (adb shell getprop ro.cm.device | grep -q "$CM_BUILD");
+ then
+ adb push $OUT/boot.img /cache/
+ for i in $OUT/system/lib/modules/*;
+ do
+ adb push $i /system/lib/modules/
+ done
+ adb shell dd if=/cache/boot.img of=$PARTITION
+ adb shell chmod 644 /system/lib/modules/*
+ echo "Installation complete."
+ else
+ echo "The connected device does not appear to be $CM_BUILD, run away!"
+ fi
+}
+
+function installrecovery()
+{
+ if [ ! -e "$OUT/recovery/root/etc/recovery.fstab" ];
+ then
+ echo "No recovery.fstab found. Build recovery first."
+ return 1
+ fi
+ if [ ! -e "$OUT/recovery.img" ];
+ then
+ echo "No recovery.img found. Run make recoveryimage first."
+ return 1
+ fi
+ PARTITION=`grep "^\/recovery" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
+ if [ -z "$PARTITION" ];
+ then
+ # Try for RECOVERY_FSTAB_VERSION = 2
+ PARTITION=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $1'}`
+ PARTITION_TYPE=`grep "[[:space:]]\/recovery[[:space:]]" $OUT/recovery/root/etc/recovery.fstab | awk {'print $3'}`
+ if [ -z "$PARTITION" ];
+ then
+ echo "Unable to determine recovery partition."
+ return 1
+ fi
+ fi
+ adb start-server
+ adb wait-for-online
+ adb root
+ sleep 1
+ adb wait-for-online shell mount /system 2>&1 >> /dev/null
+ adb wait-for-online remount
+ if (adb shell getprop ro.cm.device | grep -q "$CM_BUILD");
+ then
+ adb push $OUT/recovery.img /cache/
+ adb shell dd if=/cache/recovery.img of=$PARTITION
+ echo "Installation complete."
+ else
+ echo "The connected device does not appear to be $CM_BUILD, run away!"
+ fi
+}
+
+function makerecipe() {
+ if [ -z "$1" ]
+ then
+ echo "No branch name provided."
+ return 1
+ fi
+ cd android
+ sed -i s/'default revision=.*'/'default revision="refs\/heads\/'$1'"'/ default.xml
+ git commit -a -m "$1"
+ cd ..
+
+ repo forall -c '
+
+ if [ "$REPO_REMOTE" = "github" ]
+ then
+ pwd
+ cmremote
+ git push cmremote HEAD:refs/heads/'$1'
+ fi
+ '
+}
+
+function cmgerrit() {
+
+ if [ "$(__detect_shell)" = "zsh" ]; then
+ # zsh does not define FUNCNAME, derive from funcstack
+ local FUNCNAME=$funcstack[1]
+ fi
+
+ if [ $# -eq 0 ]; then
+ $FUNCNAME help
+ return 1
+ fi
+ local user=`git config --get review.review.cyanogenmod.org.username`
+ local review=`git config --get remote.github.review`
+ local project=`git config --get remote.github.projectname`
+ local command=$1
+ shift
+ case $command in
+ help)
+ if [ $# -eq 0 ]; then
+ cat <<EOF
+Usage:
+ $FUNCNAME COMMAND [OPTIONS] [CHANGE-ID[/PATCH-SET]][{@|^|~|:}ARG] [-- ARGS]
+
+Commands:
+ fetch Just fetch the change as FETCH_HEAD
+ help Show this help, or for a specific command
+ pull Pull a change into current branch
+ push Push HEAD or a local branch to Gerrit for a specific branch
+
+Any other Git commands that support refname would work as:
+ git fetch URL CHANGE && git COMMAND OPTIONS FETCH_HEAD{@|^|~|:}ARG -- ARGS
+
+See '$FUNCNAME help COMMAND' for more information on a specific command.
+
+Example:
+ $FUNCNAME checkout -b topic 1234/5
+works as:
+ git fetch http://DOMAIN/p/PROJECT refs/changes/34/1234/5 \\
+ && git checkout -b topic FETCH_HEAD
+will checkout a new branch 'topic' base on patch-set 5 of change 1234.
+Patch-set 1 will be fetched if omitted.
+EOF
+ return
+ fi
+ case $1 in
+ __cmg_*) echo "For internal use only." ;;
+ changes|for)
+ if [ "$FUNCNAME" = "cmgerrit" ]; then
+ echo "'$FUNCNAME $1' is deprecated."
+ fi
+ ;;
+ help) $FUNCNAME help ;;
+ fetch|pull) cat <<EOF
+usage: $FUNCNAME $1 [OPTIONS] CHANGE-ID[/PATCH-SET]
+
+works as:
+ git $1 OPTIONS http://DOMAIN/p/PROJECT \\
+ refs/changes/HASH/CHANGE-ID/{PATCH-SET|1}
+
+Example:
+ $FUNCNAME $1 1234
+will $1 patch-set 1 of change 1234
+EOF
+ ;;
+ push) cat <<EOF
+usage: $FUNCNAME push [OPTIONS] [LOCAL_BRANCH:]REMOTE_BRANCH
+
+works as:
+ git push OPTIONS ssh://USER@DOMAIN:29418/PROJECT \\
+ {LOCAL_BRANCH|HEAD}:refs/for/REMOTE_BRANCH
+
+Example:
+ $FUNCNAME push fix6789:gingerbread
+will push local branch 'fix6789' to Gerrit for branch 'gingerbread'.
+HEAD will be pushed from local if omitted.
+EOF
+ ;;
+ *)
+ $FUNCNAME __cmg_err_not_supported $1 && return
+ cat <<EOF
+usage: $FUNCNAME $1 [OPTIONS] CHANGE-ID[/PATCH-SET][{@|^|~|:}ARG] [-- ARGS]
+
+works as:
+ git fetch http://DOMAIN/p/PROJECT \\
+ refs/changes/HASH/CHANGE-ID/{PATCH-SET|1} \\
+ && git $1 OPTIONS FETCH_HEAD{@|^|~|:}ARG -- ARGS
+EOF
+ ;;
+ esac
+ ;;
+ __cmg_get_ref)
+ $FUNCNAME __cmg_err_no_arg $command $# && return 1
+ local change_id patchset_id hash
+ case $1 in
+ */*)
+ change_id=${1%%/*}
+ patchset_id=${1#*/}
+ ;;
+ *)
+ change_id=$1
+ patchset_id=1
+ ;;
+ esac
+ hash=$(($change_id % 100))
+ case $hash in
+ [0-9]) hash="0$hash" ;;
+ esac
+ echo "refs/changes/$hash/$change_id/$patchset_id"
+ ;;
+ fetch|pull)
+ $FUNCNAME __cmg_err_no_arg $command $# help && return 1
+ $FUNCNAME __cmg_err_not_repo && return 1
+ local change=$1
+ shift
+ git $command $@ http://$review/p/$project \
+ $($FUNCNAME __cmg_get_ref $change) || return 1
+ ;;
+ push)
+ $FUNCNAME __cmg_err_no_arg $command $# help && return 1
+ $FUNCNAME __cmg_err_not_repo && return 1
+ if [ -z "$user" ]; then
+ echo >&2 "Gerrit username not found."
+ return 1
+ fi
+ local local_branch remote_branch
+ case $1 in
+ *:*)
+ local_branch=${1%:*}
+ remote_branch=${1##*:}
+ ;;
+ *)
+ local_branch=HEAD
+ remote_branch=$1
+ ;;
+ esac
+ shift
+ git push $@ ssh://$user@$review:29418/$project \
+ $local_branch:refs/for/$remote_branch || return 1
+ ;;
+ changes|for)
+ if [ "$FUNCNAME" = "cmgerrit" ]; then
+ echo >&2 "'$FUNCNAME $command' is deprecated."
+ fi
+ ;;
+ __cmg_err_no_arg)
+ if [ $# -lt 2 ]; then
+ echo >&2 "'$FUNCNAME $command' missing argument."
+ elif [ $2 -eq 0 ]; then
+ if [ -n "$3" ]; then
+ $FUNCNAME help $1
+ else
+ echo >&2 "'$FUNCNAME $1' missing argument."
+ fi
+ else
+ return 1
+ fi
+ ;;
+ __cmg_err_not_repo)
+ if [ -z "$review" -o -z "$project" ]; then
+ echo >&2 "Not currently in any reviewable repository."
+ else
+ return 1
+ fi
+ ;;
+ __cmg_err_not_supported)
+ $FUNCNAME __cmg_err_no_arg $command $# && return
+ case $1 in
+ #TODO: filter more git commands that don't use refname
+ init|add|rm|mv|status|clone|remote|bisect|config|stash)
+ echo >&2 "'$FUNCNAME $1' is not supported."
+ ;;
+ *) return 1 ;;
+ esac
+ ;;
+ #TODO: other special cases?
+ *)
+ $FUNCNAME __cmg_err_not_supported $command && return 1
+ $FUNCNAME __cmg_err_no_arg $command $# help && return 1
+ $FUNCNAME __cmg_err_not_repo && return 1
+ local args="$@"
+ local change pre_args refs_arg post_args
+ case "$args" in
+ *--\ *)
+ pre_args=${args%%-- *}
+ post_args="-- ${args#*-- }"
+ ;;
+ *) pre_args="$args" ;;
+ esac
+ args=($pre_args)
+ pre_args=
+ if [ ${#args[@]} -gt 0 ]; then
+ change=${args[${#args[@]}-1]}
+ fi
+ if [ ${#args[@]} -gt 1 ]; then
+ pre_args=${args[0]}
+ for ((i=1; i<${#args[@]}-1; i++)); do
+ pre_args="$pre_args ${args[$i]}"
+ done
+ fi
+ while ((1)); do
+ case $change in
+ ""|--)
+ $FUNCNAME help $command
+ return 1
+ ;;
+ *@*)
+ if [ -z "$refs_arg" ]; then
+ refs_arg="@${change#*@}"
+ change=${change%%@*}
+ fi
+ ;;
+ *~*)
+ if [ -z "$refs_arg" ]; then
+ refs_arg="~${change#*~}"
+ change=${change%%~*}
+ fi
+ ;;
+ *^*)
+ if [ -z "$refs_arg" ]; then
+ refs_arg="^${change#*^}"
+ change=${change%%^*}
+ fi
+ ;;
+ *:*)
+ if [ -z "$refs_arg" ]; then
+ refs_arg=":${change#*:}"
+ change=${change%%:*}
+ fi
+ ;;
+ *) break ;;
+ esac
+ done
+ $FUNCNAME fetch $change \
+ && git $command $pre_args FETCH_HEAD$refs_arg $post_args \
+ || return 1
+ ;;
+ esac
+}
+
+function cmrebase() {
+ local repo=$1
+ local refs=$2
+ local pwd="$(pwd)"
+ local dir="$(gettop)/$repo"
+
+ if [ -z $repo ] || [ -z $refs ]; then
+ echo "CyanogenMod Gerrit Rebase Usage: "
+ echo " cmrebase <path to project> <patch IDs on Gerrit>"
+ echo " The patch IDs appear on the Gerrit commands that are offered."
+ echo " They consist on a series of numbers and slashes, after the text"
+ echo " refs/changes. For example, the ID in the following command is 26/8126/2"
+ echo ""
+ echo " git[...]ges_apps_Camera refs/changes/26/8126/2 && git cherry-pick FETCH_HEAD"
+ echo ""
+ return
+ fi
+
+ if [ ! -d $dir ]; then
+ echo "Directory $dir doesn't exist in tree."
+ return
+ fi
+ cd $dir
+ repo=$(cat .git/config | grep git://github.com | awk '{ print $NF }' | sed s#git://github.com/##g)
+ echo "Starting branch..."
+ repo start tmprebase .
+ echo "Bringing it up to date..."
+ repo sync .
+ echo "Fetching change..."
+ git fetch "http://review.cyanogenmod.org/p/$repo" "refs/changes/$refs" && git cherry-pick FETCH_HEAD
+ if [ "$?" != "0" ]; then
+ echo "Error cherry-picking. Not uploading!"
+ return
+ fi
+ echo "Uploading..."
+ repo upload .
+ echo "Cleaning up..."
+ repo abandon tmprebase .
+ cd $pwd
+}
+
+function mka() {
+ local T=$(gettop)
+ if [ "$T" ]; then
+ case `uname -s` in
+ Darwin)
+ make -C $T -j `sysctl hw.ncpu|cut -d" " -f2` "$@"
+ ;;
+ *)
+ mk_timer schedtool -B -n 1 -e ionice -n 1 make -C $T -j$(cat /proc/cpuinfo | grep "^processor" | wc -l) "$@"
+ ;;
+ esac
+
+ else
+ echo "Couldn't locate the top of the tree. Try setting TOP."
+ fi
+}
+
+function cmka() {
+ if [ ! -z "$1" ]; then
+ for i in "$@"; do
+ case $i in
+ bacon|otapackage|systemimage)
+ mka installclean
+ mka $i
+ ;;
+ *)
+ mka clean-$i
+ mka $i
+ ;;
+ esac
+ done
+ else
+ mka clean
+ mka
+ fi
+}
+
+function mms() {
+ local T=$(gettop)
+ if [ -z "$T" ]
+ then
+ echo "Couldn't locate the top of the tree. Try setting TOP."
+ return 1
+ fi
+
+ case `uname -s` in
+ Darwin)
+ local NUM_CPUS=$(sysctl hw.ncpu|cut -d" " -f2)
+ ONE_SHOT_MAKEFILE="__none__" \
+ make -C $T -j $NUM_CPUS "$@"
+ ;;
+ *)
+ local NUM_CPUS=$(cat /proc/cpuinfo | grep "^processor" | wc -l)
+ ONE_SHOT_MAKEFILE="__none__" \
+ mk_timer schedtool -B -n 1 -e ionice -n 1 \
+ make -C $T -j $NUM_CPUS "$@"
+ ;;
+ esac
+}
+
+
+function repolastsync() {
+ RLSPATH="$ANDROID_BUILD_TOP/.repo/.repo_fetchtimes.json"
+ RLSLOCAL=$(date -d "$(stat -c %z $RLSPATH)" +"%e %b %Y, %T %Z")
+ RLSUTC=$(date -d "$(stat -c %z $RLSPATH)" -u +"%e %b %Y, %T %Z")
+ echo "Last repo sync: $RLSLOCAL / $RLSUTC"
+}
+
+function reposync() {
+ case `uname -s` in
+ Darwin)
+ repo sync -j 4 "$@"
+ ;;
+ *)
+ schedtool -B -n 1 -e ionice -n 1 `which repo` sync -j 4 "$@"
+ ;;
+ esac
+}
+
+function repodiff() {
+ if [ -z "$*" ]; then
+ echo "Usage: repodiff <ref-from> [[ref-to] [--numstat]]"
+ return
+ fi
+ diffopts=$* repo forall -c \
+ 'echo "$REPO_PATH ($REPO_REMOTE)"; git diff ${diffopts} 2>/dev/null ;'
+}
+
+# Return success if adb is up and not in recovery
+function _adb_connected {
+ {
+ if [[ "$(adb get-state)" == device &&
+ "$(adb shell test -e /sbin/recovery; echo $?)" == 0 ]]
+ then
+ return 0
+ fi
+ } 2>/dev/null
+
+ return 1
+};
+
+# Credit for color strip sed: http://goo.gl/BoIcm
+function dopush()
+{
+ local func=$1
+ shift
+
+ adb start-server # Prevent unexpected starting server message from adb get-state in the next line
+ if ! _adb_connected; then
+ echo "No device is online. Waiting for one..."
+ echo "Please connect USB and/or enable USB debugging"
+ until _adb_connected; do
+ sleep 1
+ done
+ echo "Device Found."
+ fi
+
+ if (adb shell getprop ro.cm.device | grep -q "$CM_BUILD") || [ "$FORCE_PUSH" = "true" ];
+ then
+ # retrieve IP and PORT info if we're using a TCP connection
+ TCPIPPORT=$(adb devices | egrep '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+:[0-9]+[^0-9]+' \
+ | head -1 | awk '{print $1}')
+ adb root &> /dev/null
+ sleep 0.3
+ if [ -n "$TCPIPPORT" ]
+ then
+ # adb root just killed our connection
+ # so reconnect...
+ adb connect "$TCPIPPORT"
+ fi
+ adb wait-for-device &> /dev/null
+ sleep 0.3
+ adb remount &> /dev/null
+
+ mkdir -p $OUT
+ ($func $*|tee $OUT/.log;return ${PIPESTATUS[0]})
+ ret=$?;
+ if [ $ret -ne 0 ]; then
+ rm -f $OUT/.log;return $ret
+ fi
+
+ # Install: <file>
+ if [ `uname` = "Linux" ]; then
+ LOC="$(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Install: ' | cut -d ':' -f 2)"
+ else
+ LOC="$(cat $OUT/.log | sed -E "s/"$'\E'"\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]//g" | grep '^Install: ' | cut -d ':' -f 2)"
+ fi
+
+ # Copy: <file>
+ if [ `uname` = "Linux" ]; then
+ LOC="$LOC $(cat $OUT/.log | sed -r 's/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g' | grep '^Copy: ' | cut -d ':' -f 2)"
+ else
+ LOC="$LOC $(cat $OUT/.log | sed -E "s/"$'\E'"\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]//g" | grep '^Copy: ' | cut -d ':' -f 2)"
+ fi
+
+ # If any files are going to /data, push an octal file permissions reader to device
+ if [ -n "$(echo $LOC | egrep '(^|\s)/data')" ]; then
+ CHKPERM="/data/local/tmp/chkfileperm.sh"
+(
+cat <<'EOF'
+#!/system/xbin/sh
+FILE=$@
+if [ -e $FILE ]; then
+ ls -l $FILE | awk '{k=0;for(i=0;i<=8;i++)k+=((substr($1,i+2,1)~/[rwx]/)*2^(8-i));if(k)printf("%0o ",k);print}' | cut -d ' ' -f1
+fi
+EOF
+) > $OUT/.chkfileperm.sh
+ echo "Pushing file permissions checker to device"
+ adb push $OUT/.chkfileperm.sh $CHKPERM
+ adb shell chmod 755 $CHKPERM
+ rm -f $OUT/.chkfileperm.sh
+ fi
+
+ stop_n_start=false
+ for FILE in $(echo $LOC | tr " " "\n"); do
+ # Make sure file is in $OUT/system or $OUT/data
+ case $FILE in
+ $OUT/system/*|$OUT/data/*)
+ # Get target file name (i.e. /system/bin/adb)
+ TARGET=$(echo $FILE | sed "s#$OUT##")
+ ;;
+ *) continue ;;
+ esac
+
+ case $TARGET in
+ /data/*)
+ # fs_config only sets permissions and se labels for files pushed to /system
+ if [ -n "$CHKPERM" ]; then
+ OLDPERM=$(adb shell $CHKPERM $TARGET)
+ OLDPERM=$(echo $OLDPERM | tr -d '\r' | tr -d '\n')
+ OLDOWN=$(adb shell ls -al $TARGET | awk '{print $2}')
+ OLDGRP=$(adb shell ls -al $TARGET | awk '{print $3}')
+ fi
+ echo "Pushing: $TARGET"
+ adb push $FILE $TARGET
+ if [ -n "$OLDPERM" ]; then
+ echo "Setting file permissions: $OLDPERM, $OLDOWN":"$OLDGRP"
+ adb shell chown "$OLDOWN":"$OLDGRP" $TARGET
+ adb shell chmod "$OLDPERM" $TARGET
+ else
+ echo "$TARGET did not exist previously, you should set file permissions manually"
+ fi
+ adb shell restorecon "$TARGET"
+ ;;
+ /system/priv-app/SystemUI/SystemUI.apk|/system/framework/*)
+ # Only need to stop services once
+ if ! $stop_n_start; then
+ adb shell stop
+ stop_n_start=true
+ fi
+ echo "Pushing: $TARGET"
+ adb push $FILE $TARGET
+ ;;
+ *)
+ echo "Pushing: $TARGET"
+ adb push $FILE $TARGET
+ ;;
+ esac
+ done
+ if [ -n "$CHKPERM" ]; then
+ adb shell rm $CHKPERM
+ fi
+ if $stop_n_start; then
+ adb shell start
+ fi
+ rm -f $OUT/.log
+ return 0
+ else
+ echo "The connected device does not appear to be $CM_BUILD, run away!"
+ fi
+}
+
+alias mmp='dopush mm'
+alias mmmp='dopush mmm'
+alias mmap='dopush mma'
+alias mkap='dopush mka'
+alias cmkap='dopush cmka'
+
+function repopick() {
+ T=$(gettop)
+ $T/build/tools/repopick.py $@
+}
+
+function fixup_common_out_dir() {
+ common_out_dir=$(get_build_var OUT_DIR)/target/common
+ target_device=$(get_build_var TARGET_DEVICE)
+ if [ ! -z $CM_FIXUP_COMMON_OUT ]; then
+ if [ -d ${common_out_dir} ] && [ ! -L ${common_out_dir} ]; then
+ mv ${common_out_dir} ${common_out_dir}-${target_device}
+ ln -s ${common_out_dir}-${target_device} ${common_out_dir}
+ else
+ [ -L ${common_out_dir} ] && rm ${common_out_dir}
+ mkdir -p ${common_out_dir}-${target_device}
+ ln -s ${common_out_dir}-${target_device} ${common_out_dir}
+ fi
+ else
+ [ -L ${common_out_dir} ] && rm ${common_out_dir}
+ mkdir -p ${common_out_dir}
+ fi
+}
+
# Force JAVA_HOME to point to java 1.7 if it isn't already set.
#
# Note that the MacOS path for java 1.7 includes a minor revision number (sigh).
@@ -1421,7 +2406,7 @@ function set_java_home() {
export JAVA_HOME=$(/usr/libexec/java_home -v 1.7)
;;
*)
- export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
+ export JAVA_HOME=$(dirname $(dirname $(dirname $(readlink -f $(which java)))))
;;
esac
@@ -1437,9 +2422,9 @@ function pez {
local retval=$?
if [ $retval -ne 0 ]
then
- echo -e "\e[0;31mFAILURE\e[00m"
+ printf "\e[0;31mFAILURE\e[00m\n"
else
- echo -e "\e[0;32mSUCCESS\e[00m"
+ printf "\e[0;32mSUCCESS\e[00m\n"
fi
return $retval
}
@@ -1449,10 +2434,10 @@ function get_make_command()
echo command make
}
-function make()
+function mk_timer()
{
local start_time=$(date +"%s")
- $(get_make_command) "$@"
+ $@
local ret=$?
local end_time=$(date +"%s")
local tdiff=$(($end_time-$start_time))
@@ -1463,7 +2448,7 @@ function make()
if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then
color_failed="\e[0;31m"
color_success="\e[0;32m"
- color_reset="\e[00m"
+ color_reset="\e[0m"
else
color_failed=""
color_success=""
@@ -1471,9 +2456,9 @@ function make()
fi
echo
if [ $ret -eq 0 ] ; then
- echo -n -e "${color_success}#### make completed successfully "
+ printf "${color_success}#### make completed successfully "
else
- echo -n -e "${color_failed}#### make failed to build some targets "
+ printf "${color_failed}#### make failed to build some targets "
fi
if [ $hours -gt 0 ] ; then
printf "(%02g:%02g:%02g (hh:mm:ss))" $hours $mins $secs
@@ -1482,19 +2467,34 @@ function make()
elif [ $secs -gt 0 ] ; then
printf "(%s seconds)" $secs
fi
- echo -e " ####${color_reset}"
- echo
+ printf " ####${color_reset}\n\n"
return $ret
}
-if [ "x$SHELL" != "x/bin/bash" ]; then
+function make()
+{
+ mk_timer $(get_make_command) "$@"
+}
+
+function __detect_shell() {
case `ps -o command -p $$` in
*bash*)
+ echo bash
+ ;;
+ *zsh*)
+ echo zsh
;;
*)
- echo "WARNING: Only bash is supported, use of other shell would lead to erroneous results"
+ echo unknown
+ return 1
;;
esac
+ return
+}
+
+
+if ! __detect_shell > /dev/null; then
+ echo "WARNING: Only bash and zsh are supported, use of other shell may lead to erroneous results"
fi
# Execute the contents of any vendorsetup.sh files we can find.
@@ -1506,4 +2506,17 @@ do
done
unset f
-addcompletions
+# Add completions
+check_bash_version && {
+ dirs="sdk/bash_completion vendor/cm/bash_completion"
+ for dir in $dirs; do
+ if [ -d ${dir} ]; then
+ for f in `/bin/ls ${dir}/[a-z]*.bash 2> /dev/null`; do
+ echo "including $f"
+ . $f
+ done
+ fi
+ done
+}
+
+export ANDROID_BUILD_TOP=$(gettop)
diff --git a/target/board/generic/sepolicy/property_contexts b/target/board/generic/sepolicy/property_contexts
index 09b9b06..a0a4020 100644
--- a/target/board/generic/sepolicy/property_contexts
+++ b/target/board/generic/sepolicy/property_contexts
@@ -1,2 +1,4 @@
qemu. u:object_r:qemu_prop:s0
+emu. u:object_r:qemu_prop:s0
+emulator. u:object_r:qemu_prop:s0
radio.noril u:object_r:radio_noril_prop:s0
diff --git a/target/product/base.mk b/target/product/base.mk
index 4c49e86..a9610d1 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -119,6 +119,7 @@ PRODUCT_PACKAGES += \
svc \
tc \
telecom \
+ tm \
vdc \
vold \
wm
diff --git a/target/product/core.mk b/target/product/core.mk
index d453303..c340acd 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -33,13 +33,11 @@ PRODUCT_PACKAGES += \
DownloadProviderUi \
Email \
ExactCalculator \
- Exchange2 \
ExternalStorageProvider \
FusedLocation \
InputDevices \
KeyChain \
Keyguard \
- LatinIME \
Launcher2 \
ManagedProvisioning \
PicoTts \
diff --git a/target/product/core_base.mk b/target/product/core_base.mk
index 03d33e1..86fb36c 100644
--- a/target/product/core_base.mk
+++ b/target/product/core_base.mk
@@ -16,14 +16,9 @@
# Note that components added here will be also shared in PDK. Components
# that should not be in PDK should be added in lower level like core.mk.
-PRODUCT_PROPERTY_OVERRIDES := \
- ro.config.notification_sound=OnTheHunt.ogg \
- ro.config.alarm_alert=Alarm_Classic.ogg
-
PRODUCT_PACKAGES += \
ContactsProvider \
DefaultContainerService \
- Home \
TelephonyProvider \
UserDictionaryProvider \
atrace \
diff --git a/target/product/core_minimal.mk b/target/product/core_minimal.mk
index 27c10af..e8f2620 100644
--- a/target/product/core_minimal.mk
+++ b/target/product/core_minimal.mk
@@ -98,16 +98,16 @@ PRODUCT_BOOT_JARS := \
# The order of PRODUCT_SYSTEM_SERVER_JARS matters.
PRODUCT_SYSTEM_SERVER_JARS := \
+ org.cyanogenmod.platform \
+ org.cyanogenmod.hardware \
services \
ethernet-service \
wifi-service
-# Adoptable external storage supports both ext4 and f2fs
+# Adoptable external storage f2fs support
PRODUCT_PACKAGES += \
- e2fsck \
- make_ext4fs \
fsck.f2fs \
- make_f2fs \
+ mkfs.f2fs \
PRODUCT_DEFAULT_PROPERTY_OVERRIDES += \
ro.zygote=zygote32
diff --git a/target/product/core_tiny.mk b/target/product/core_tiny.mk
index 0a92275..31e2ae3 100644
--- a/target/product/core_tiny.mk
+++ b/target/product/core_tiny.mk
@@ -55,8 +55,10 @@ PRODUCT_PACKAGES += \
ip6tables \
iptables \
gatekeeperd \
+ javax.btobex \
keystore \
keystore.default \
+ ld.mc \
libOpenMAXAL \
libOpenSLES \
libdownmix \
diff --git a/target/product/emulator.mk b/target/product/emulator.mk
index 7394d4f..cca29ab 100644
--- a/target/product/emulator.mk
+++ b/target/product/emulator.mk
@@ -47,7 +47,8 @@ PRODUCT_PACKAGES += \
power.goldfish \
fingerprint.ranchu \
fingerprintd \
- sensors.ranchu
+ sensors.ranchu \
+ e2fsck
PRODUCT_COPY_FILES += \
frameworks/native/data/etc/android.hardware.ethernet.xml:system/etc/permissions/android.hardware.ethernet.xml \
diff --git a/target/product/full_base.mk b/target/product/full_base.mk
index 65bdf0f..bac3e03 100644
--- a/target/product/full_base.mk
+++ b/target/product/full_base.mk
@@ -21,27 +21,8 @@
PRODUCT_PACKAGES := \
libfwdlockengine \
- OpenWnn \
- libWnnEngDic \
- libWnnJpnDic \
- libwnndict \
WAPPushManager
-PRODUCT_PACKAGES += \
- Galaxy4 \
- HoloSpiralWallpaper \
- LiveWallpapers \
- LiveWallpapersPicker \
- MagicSmokeWallpapers \
- NoiseField \
- PhaseBeam \
- PhotoTable
-
-# Additional settings used in all AOSP builds
-PRODUCT_PROPERTY_OVERRIDES := \
- ro.config.ringtone=Ring_Synth_04.ogg \
- ro.config.notification_sound=pixiedust.ogg
-
# Put en_US first in the list, so make it default.
PRODUCT_LOCALES := en_US
diff --git a/target/product/full_base_telephony.mk b/target/product/full_base_telephony.mk
index 9a2c63a..7988e9b 100644
--- a/target/product/full_base_telephony.mk
+++ b/target/product/full_base_telephony.mk
@@ -20,12 +20,7 @@
# entirely appropriate to inherit from for on-device configurations.
PRODUCT_PROPERTY_OVERRIDES := \
- keyguard.no_require_sim=true \
- ro.com.android.dataroaming=true
-
-PRODUCT_COPY_FILES := \
- device/generic/goldfish/data/etc/apns-conf.xml:system/etc/apns-conf.xml \
- frameworks/native/data/etc/handheld_core_hardware.xml:system/etc/permissions/handheld_core_hardware.xml
+ keyguard.no_require_sim=true
$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_base.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony.mk)
diff --git a/target/product/generic_no_telephony.mk b/target/product/generic_no_telephony.mk
index f6ccd2a..0f9935a 100644
--- a/target/product/generic_no_telephony.mk
+++ b/target/product/generic_no_telephony.mk
@@ -32,16 +32,12 @@ PRODUCT_PACKAGES := \
PRODUCT_PACKAGES += \
clatd \
clatd.conf \
+ javax.btobex \
pppd \
screenrecord
PRODUCT_PACKAGES += \
- librs_jni \
- libvideoeditor_jni \
- libvideoeditor_core \
- libvideoeditor_osal \
- libvideoeditor_videofilters \
- libvideoeditorplayer \
+ librs_jni
PRODUCT_PACKAGES += \
audio.primary.default \
diff --git a/target/product/languages_full.mk b/target/product/languages_full.mk
index 9d80b0e..d4410b3 100644
--- a/target/product/languages_full.mk
+++ b/target/product/languages_full.mk
@@ -21,4 +21,8 @@
# These are all the locales that have translations and are displayable
# by TextView in this branch.
+
PRODUCT_LOCALES := en_US en_AU en_IN fr_FR it_IT es_ES et_EE de_DE nl_NL cs_CZ pl_PL ja_JP zh_TW zh_CN zh_HK ru_RU ko_KR nb_NO es_US da_DK el_GR tr_TR pt_PT pt_BR rm_CH sv_SE bg_BG ca_ES en_GB fi_FI hi_IN hr_HR hu_HU in_ID iw_IL lt_LT lv_LV ro_RO sk_SK sl_SI sr_RS uk_UA vi_VN tl_PH ar_EG fa_IR th_TH sw_TZ ms_MY af_ZA zu_ZA am_ET hi_IN en_XA ar_XB fr_CA km_KH lo_LA ne_NP si_LK mn_MN hy_AM az_AZ ka_GE my_MM mr_IN ml_IN is_IS mk_MK ky_KG eu_ES gl_ES bn_BD ta_IN kn_IN te_IN uz_UZ ur_PK kk_KZ sq_AL gu_IN pa_IN
+
+# CyanogenMod
+PRODUCT_LOCALES += ast_ES lb_LU ku_IQ
diff --git a/target/product/sdk_phone_arm64.mk b/target/product/sdk_phone_arm64.mk
index a0cf6c1..1d13b9e 100644
--- a/target/product/sdk_phone_arm64.mk
+++ b/target/product/sdk_phone_arm64.mk
@@ -24,7 +24,7 @@ $(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/board/generic_arm64/device.mk)
# Overrides
-PRODUCT_BRAND := generic_arm64
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_arm64
PRODUCT_DEVICE := generic_arm64
PRODUCT_MODEL := Android SDK built for arm64
diff --git a/target/product/sdk_phone_armv7.mk b/target/product/sdk_phone_armv7.mk
index aeb4940..a0fa049 100644
--- a/target/product/sdk_phone_armv7.mk
+++ b/target/product/sdk_phone_armv7.mk
@@ -17,6 +17,6 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
# Overrides
-PRODUCT_BRAND := generic
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_armv7
PRODUCT_DEVICE := generic
diff --git a/target/product/sdk_phone_mips.mk b/target/product/sdk_phone_mips.mk
index 818491f..d7217a0 100644
--- a/target/product/sdk_phone_mips.mk
+++ b/target/product/sdk_phone_mips.mk
@@ -22,7 +22,7 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
# Overrides
-PRODUCT_BRAND := generic_mips
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_mips
PRODUCT_DEVICE := generic_mips
PRODUCT_MODEL := Android SDK for Mips
diff --git a/target/product/sdk_phone_mips64.mk b/target/product/sdk_phone_mips64.mk
index afdb2a8..8ddcb58 100644
--- a/target/product/sdk_phone_mips64.mk
+++ b/target/product/sdk_phone_mips64.mk
@@ -23,7 +23,7 @@ $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
# Overrides
-PRODUCT_BRAND := generic_mips64
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_mips64
PRODUCT_DEVICE := generic_mips64
PRODUCT_MODEL := Android SDK built for mips64
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
index 95c49ab..a58d26f 100644
--- a/target/product/sdk_phone_x86.mk
+++ b/target/product/sdk_phone_x86.mk
@@ -22,7 +22,7 @@
$(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
# Overrides
-PRODUCT_BRAND := generic_x86
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_x86
PRODUCT_DEVICE := generic_x86
PRODUCT_MODEL := Android SDK built for x86
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
index 69e37af..c39b274 100644
--- a/target/product/sdk_phone_x86_64.mk
+++ b/target/product/sdk_phone_x86_64.mk
@@ -23,7 +23,7 @@ $(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
$(call inherit-product, $(SRC_TARGET_DIR)/product/sdk_base.mk)
# Overrides
-PRODUCT_BRAND := generic_x86_64
+PRODUCT_BRAND := Android
PRODUCT_NAME := sdk_phone_x86_64
PRODUCT_DEVICE := generic_x86_64
PRODUCT_MODEL := Android SDK built for x86_64
diff --git a/target/product/security/cm-devkey.x509.pem b/target/product/security/cm-devkey.x509.pem
new file mode 100644
index 0000000..b7a6ae4
--- /dev/null
+++ b/target/product/security/cm-devkey.x509.pem
@@ -0,0 +1,23 @@
+-----BEGIN CERTIFICATE-----
+MIID1TCCAr2gAwIBAgIJANO67t8hIti6MA0GCSqGSIb3DQEBBQUAMIGAMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEX
+MBUGA1UECgwOQ3lhbm9nZW4sIEluYy4xGzAZBgNVBAsMElJlbGVhc2UgTWFuYWdl
+bWVudDEUMBIGA1UEAwwLRGV2ZWxvcG1lbnQwHhcNMTQwNDI4MjAyODM3WhcNNDEw
+OTEzMjAyODM3WjCBgDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCldhc2hpbmd0b24x
+EDAOBgNVBAcMB1NlYXR0bGUxFzAVBgNVBAoMDkN5YW5vZ2VuLCBJbmMuMRswGQYD
+VQQLDBJSZWxlYXNlIE1hbmFnZW1lbnQxFDASBgNVBAMMC0RldmVsb3BtZW50MIIB
+IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz/V9RvYnr18fraPWNeQEZNeg
+Kc0A3QskImQyGY22EGBZ63KUxa6zAfAug0OYSjofVJRaTtdvBXjO/C71XZRh4wun
+xhOUAJt8zIJ0lRx8GMC0GHzePEnEVvoiu3zSAPHCNf5lmdhyhccMOtC18J+evPf4
+EVBb3cis+F1m6ZoZKPgSFBR5A9CV5Tai8iiZluGGg15Wt12Rp2vmbmQxiOJZxBs4
+Ps40XR5gjO1q4R3HiGnFyql9qeecwaTUWXAd76lhNiLUr7K8IRs+96i+t5vSKajB
+M8O99BtYyBtf8ItMnHSZJxtsMw+TFXNLmMtaQarpsjp0LLGuHb/vsrjgBPvzsQID
+AQABo1AwTjAdBgNVHQ4EFgQUTpNgXBqV7j+33bi8B80YLQq6EL8wHwYDVR0jBBgw
+FoAUTpNgXBqV7j+33bi8B80YLQq6EL8wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B
+AQUFAAOCAQEAVlVBNksK+1C3J8uQ9kVemYMozfbboV9c4PLbtVzNSO8vwZ3X5E4T
+2zfQPcFsfSMIa51a1tETBcDA6k+72xHZ+xEQJQNrX+o1F1RIIrXp0OKAz/k5cXyk
+OS0+nd0EXP/A1EW0m8N/X6E9wpRkIhfqtEsqeCf8GH7O9Ua2qHZ9zkTBpbAVH0oe
+ZWorHBdo3GdMJ5vcjFqnDdRs8F0BnZmjS+NrgXRLhLb6ZARS/bkUQyr5TX82dgG6
+vzvKsdKyX34gsKAsjxwLWo7XXgehFfjY+SGjjilJtardr+y/KlHNEw9s9aLe+Xny
+Qoa9j9Ut6/KwRaC6lSEQ7HZk6SdzFsdugA==
+-----END CERTIFICATE-----
diff --git a/target/product/security/cm.x509.pem b/target/product/security/cm.x509.pem
new file mode 100644
index 0000000..5ff19db
--- /dev/null
+++ b/target/product/security/cm.x509.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDZDCCAkygAwIBAgIEUfbexjANBgkqhkiG9w0BAQUFADB0MQswCQYDVQQGEwJV
+UzELMAkGA1UECBMCV0ExEDAOBgNVBAcTB1NlYXR0bGUxFjAUBgNVBAoTDUN5YW5v
+Z2VuIEluYy4xFjAUBgNVBAsTDUN5YW5vZ2VuIEluYy4xFjAUBgNVBAMTDUN5YW5v
+Z2VuIEluYy4wHhcNMTMwNzI5MjEyOTQyWhcNNDAxMjE0MjEyOTQyWjB0MQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCV0ExEDAOBgNVBAcTB1NlYXR0bGUxFjAUBgNVBAoT
+DUN5YW5vZ2VuIEluYy4xFjAUBgNVBAsTDUN5YW5vZ2VuIEluYy4xFjAUBgNVBAMT
+DUN5YW5vZ2VuIEluYy4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCP
++K2NPqdZ6UmexXQ8tsc2TkLXYhiuEsifO66qlpwsTTw1522HcbKPVoPRr/JBXqOv
+E3K0HuZ/IsYYGsP/wJWWvpaWs+5xC+YkLkittK2uUzTqndpLFiDRAeICKpvDJI57
+Z0DkzVYXBPn+yw+x8ttjT/vWcJ3PEVYuew8MYPUOgKpdZlQtUBeoEBDSL8JPGViq
+e6jWOlSAWekhlgb+wb9RoXhu/v2HYzp89GG0sIrAgj7vZCior5XuFmm8eWhqUhTp
+TUBv/nNI/ORYt3G8IQyI2pJN1GNPAAv1uA5i4y/deX1x4GCWyN9feiD9fOj2oc3z
+Hwf5Frs9BjOb9XMXecbNAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAGudhFe9VnES
+fWynTYO4kWNlMC++yB6qD3pHW6HtBiaANj9wxrLCTFzf+boHNPeZ8HDkW01zAaQK
+fd9/fnGmHf4q/QvxrvGbnb3Fqhw+2hknbbMUoAa+Qp+2ouf9oJaNRquZ+rHEHX8g
+Rx8wGyvjaWYfQrwyZRgXj/Jrc/NXxQCmSJeexHVNXgQD6aOLHJYrJ+s+U/hwVNiM
+5L+psOh89itwt8DGGSLW16HjQKmPPbWbqxgnfRbOlxWrLDq3agcrskYpDP2aGGBA
+5STq/bvh9yZkrNYvMGzrXDhcJ44QRS8e1Jw/ZtfFvJD192e7KKVdy7CJWmOckCNK
+gl0KCQ3MBx4=
+-----END CERTIFICATE-----
diff --git a/target/product/telephony.mk b/target/product/telephony.mk
index 919d024..e8e454f 100644
--- a/target/product/telephony.mk
+++ b/target/product/telephony.mk
@@ -21,6 +21,8 @@ PRODUCT_PACKAGES := \
CarrierConfig \
Dialer \
CallLogBackup \
+ InCallUI \
+ Mms \
rild
PRODUCT_COPY_FILES := \
diff --git a/tools/adbs b/tools/adbs
index a8f06c0..9bd5160 100755
--- a/tools/adbs
+++ b/tools/adbs
@@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import os
import os.path
import re
diff --git a/tools/apicheck/Android.mk b/tools/apicheck/Android.mk
index 1674a17..1505c8d 100644
--- a/tools/apicheck/Android.mk
+++ b/tools/apicheck/Android.mk
@@ -32,7 +32,7 @@ include $(BUILD_SYSTEM)/base_rules.mk
$(LOCAL_BUILT_MODULE): $(HOST_OUT_JAVA_LIBRARIES)/doclava$(COMMON_JAVA_PACKAGE_SUFFIX)
$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/etc/apicheck | $(ACP)
- @echo "Copy: $(PRIVATE_MODULE) ($@)"
+ @echo -e ${CL_CYN}"Copy:"${CL_RST}" $(PRIVATE_MODULE) ($@)"
$(copy-file-to-new-target)
$(hide) chmod 755 $@
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 5c199b8..fa717b7 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -20,10 +20,8 @@ echo "ro.build.user=$USER"
echo "ro.build.host=`hostname`"
echo "ro.build.tags=$BUILD_VERSION_TAGS"
echo "ro.build.flavor=$TARGET_BUILD_FLAVOR"
-echo "ro.product.model=$PRODUCT_MODEL"
echo "ro.product.brand=$PRODUCT_BRAND"
echo "ro.product.name=$PRODUCT_NAME"
-echo "ro.product.device=$TARGET_DEVICE"
echo "ro.product.board=$TARGET_BOOTLOADER_BOARD_NAME"
# These values are deprecated, use "ro.product.cpu.abilist"
@@ -39,21 +37,28 @@ echo "ro.product.cpu.abilist32=$TARGET_CPU_ABI_LIST_32_BIT"
echo "ro.product.cpu.abilist64=$TARGET_CPU_ABI_LIST_64_BIT"
echo "ro.product.manufacturer=$PRODUCT_MANUFACTURER"
-if [ -n "$PRODUCT_DEFAULT_LOCALE" ] ; then
+if [ -n "$PRODUCT_DEFAULT_LOCALE" ] && [ -z "$TARGET_SKIP_DEFAULT_LOCALE" ] ; then
echo "ro.product.locale=$PRODUCT_DEFAULT_LOCALE"
fi
echo "ro.wifi.channels=$PRODUCT_DEFAULT_WIFI_CHANNELS"
echo "ro.board.platform=$TARGET_BOARD_PLATFORM"
-echo "# ro.build.product is obsolete; use ro.product.device"
-echo "ro.build.product=$TARGET_DEVICE"
-
-echo "# Do not try to parse description, fingerprint, or thumbprint"
-echo "ro.build.description=$PRIVATE_BUILD_DESC"
-echo "ro.build.fingerprint=$BUILD_FINGERPRINT"
-if [ -n "$BUILD_THUMBPRINT" ] ; then
- echo "ro.build.thumbprint=$BUILD_THUMBPRINT"
+if [ "$TARGET_UNIFIED_DEVICE" == "" ] ; then
+ echo "# ro.build.product is obsolete; use ro.product.device"
+ echo "ro.build.product=$TARGET_DEVICE"
+ if [ -z "$TARGET_SKIP_PRODUCT_DEVICE" ] ; then
+ echo "ro.product.model=$PRODUCT_MODEL"
+ fi
+ echo "ro.product.device=$TARGET_DEVICE"
+ echo "# Do not try to parse description, fingerprint, or thumbprint"
+ echo "ro.build.description=$PRIVATE_BUILD_DESC"
+ echo "ro.build.fingerprint=$BUILD_FINGERPRINT"
+ if [ -n "$BUILD_THUMBPRINT" ] ; then
+ echo "ro.build.thumbprint=$BUILD_THUMBPRINT"
+ fi
fi
echo "ro.build.characteristics=$TARGET_AAPT_CHARACTERISTICS"
+echo "ro.cm.device=$CM_DEVICE"
+
echo "# end build properties"
diff --git a/tools/check_radio_versions.py b/tools/check_radio_versions.py
index ebe621f..2617424 100755
--- a/tools/check_radio_versions.py
+++ b/tools/check_radio_versions.py
@@ -14,8 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import sys
-import os
try:
from hashlib import sha1
@@ -52,8 +53,9 @@ for item in sys.argv[2:]:
try:
f = open(fn + ".sha1")
except IOError:
- if not bad: print
- print "*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key)
+ if not bad:
+ print()
+ print("*** Error opening \"%s.sha1\"; can't verify %s" % (fn, key))
bad = True
continue
for line in f:
@@ -63,17 +65,19 @@ for item in sys.argv[2:]:
versions[h] = v
if digest not in versions:
- if not bad: print
- print "*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn)
+ if not bad:
+ print()
+ print("*** SHA-1 hash of \"%s\" doesn't appear in \"%s.sha1\"" % (fn, fn))
bad = True
continue
if versions[digest] not in values:
- if not bad: print
- print "*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
- fn, versions[digest], key, sys.argv[1])
+ if not bad:
+ print()
+ print("*** \"%s\" is version %s; not any %s allowed by \"%s\"." % (
+ fn, versions[digest], key, sys.argv[1]))
bad = True
if bad:
- print
+ print()
sys.exit(1)
diff --git a/tools/compare_fileslist.py b/tools/compare_fileslist.py
index 1f507d8..64ad3ae 100755
--- a/tools/compare_fileslist.py
+++ b/tools/compare_fileslist.py
@@ -15,7 +15,16 @@
# limitations under the License.
#
-import cgi, os, string, sys
+from __future__ import print_function
+
+import cgi, os, sys
+
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
def IsDifferent(row):
val = None
@@ -33,27 +42,27 @@ def main(argv):
data = {}
index = 0
for input in inputs:
- f = file(input, "r")
+ f = open(input)
lines = f.readlines()
f.close()
- lines = map(string.split, lines)
- lines = map(lambda (x,y): (y,int(x)), lines)
+ lines = [l.strip() for l in lines]
+ lines = [(x_y[1],int(x_y[0])) for x_y in lines]
for fn,sz in lines:
- if not data.has_key(fn):
+ if fn not in data:
data[fn] = {}
data[fn][index] = sz
index = index + 1
rows = []
- for fn,sizes in data.iteritems():
+ for fn,sizes in iteritems(data):
row = [fn]
for i in range(0,index):
- if sizes.has_key(i):
+ if i in sizes:
row.append(sizes[i])
else:
row.append(None)
rows.append(row)
rows = sorted(rows, key=lambda x: x[0])
- print """<html>
+ print("""<html>
<head>
<style type="text/css">
.fn, .sz, .z, .d {
@@ -78,27 +87,27 @@ def main(argv):
</style>
</head>
<body>
- """
- print "<table>"
- print "<tr>"
+ """)
+ print("<table>")
+ print("<tr>")
for input in inputs:
combo = input.split(os.path.sep)[1]
- print " <td class='fn'>%s</td>" % cgi.escape(combo)
- print "</tr>"
+ print(" <td class='fn'>%s</td>" % cgi.escape(combo))
+ print("</tr>")
for row in rows:
- print "<tr>"
+ print("<tr>")
for sz in row[1:]:
if not sz:
- print " <td class='z'>&nbsp;</td>"
+ print(" <td class='z'>&nbsp;</td>")
elif IsDifferent(row[1:]):
- print " <td class='d'>%d</td>" % sz
+ print(" <td class='d'>%d</td>" % sz)
else:
- print " <td class='sz'>%d</td>" % sz
- print " <td class='fn'>%s</td>" % cgi.escape(row[0])
- print "</tr>"
- print "</table>"
- print "</body></html>"
+ print(" <td class='sz'>%d</td>" % sz)
+ print(" <td class='fn'>%s</td>" % cgi.escape(row[0]))
+ print("</tr>")
+ print("</table>")
+ print("</body></html>")
if __name__ == '__main__':
main(sys.argv)
diff --git a/tools/device/AndroidBoard.mk.template b/tools/device/AndroidBoard.mk.template
new file mode 100644
index 0000000..55a36d5
--- /dev/null
+++ b/tools/device/AndroidBoard.mk.template
@@ -0,0 +1,8 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+ALL_PREBUILT += $(INSTALLED_KERNEL_TARGET)
+
+# include the non-open-source counterpart to this file
+-include vendor/__MANUFACTURER__/__DEVICE__/AndroidBoardVendor.mk
diff --git a/tools/device/AndroidProducts.mk.template b/tools/device/AndroidProducts.mk.template
new file mode 100644
index 0000000..f31c5bf
--- /dev/null
+++ b/tools/device/AndroidProducts.mk.template
@@ -0,0 +1,2 @@
+PRODUCT_MAKEFILES := \
+ $(LOCAL_DIR)/device___DEVICE__.mk
diff --git a/tools/device/BoardConfig.mk.template b/tools/device/BoardConfig.mk.template
new file mode 100644
index 0000000..617673f
--- /dev/null
+++ b/tools/device/BoardConfig.mk.template
@@ -0,0 +1,31 @@
+USE_CAMERA_STUB := true
+
+# inherit from the proprietary version
+-include vendor/__MANUFACTURER__/__DEVICE__/BoardConfigVendor.mk
+
+TARGET_ARCH := arm
+TARGET_NO_BOOTLOADER := true
+TARGET_BOARD_PLATFORM := unknown
+TARGET_CPU_ABI := armeabi-v7a
+TARGET_CPU_ABI2 := armeabi
+TARGET_ARCH_VARIANT := armv7-a-neon
+TARGET_CPU_VARIANT := cortex-a7
+TARGET_CPU_SMP := true
+ARCH_ARM_HAVE_TLS_REGISTER := true
+
+TARGET_BOOTLOADER_BOARD_NAME := __DEVICE__
+
+BOARD_KERNEL_CMDLINE := __CMDLINE__
+BOARD_KERNEL_BASE := 0x__BASE__
+BOARD_KERNEL_PAGESIZE := __PAGE_SIZE__
+
+# fix this up by examining /proc/mtd on a running device
+BOARD_BOOTIMAGE_PARTITION_SIZE := 0x00380000
+BOARD_RECOVERYIMAGE_PARTITION_SIZE := 0x00480000
+BOARD_SYSTEMIMAGE_PARTITION_SIZE := 0x08c60000
+BOARD_USERDATAIMAGE_PARTITION_SIZE := 0x105c0000
+BOARD_FLASH_BLOCK_SIZE := 131072
+
+TARGET_PREBUILT_KERNEL := device/__MANUFACTURER__/__DEVICE__/kernel
+
+BOARD_HAS_NO_SELECT_BUTTON := true
diff --git a/tools/device/cm.mk.template b/tools/device/cm.mk.template
new file mode 100644
index 0000000..e07898a
--- /dev/null
+++ b/tools/device/cm.mk.template
@@ -0,0 +1,15 @@
+# Release name
+PRODUCT_RELEASE_NAME := __DEVICE__
+
+# Inherit some common CM stuff.
+$(call inherit-product, vendor/cm/config/common_full_phone.mk)
+
+# Inherit device configuration
+$(call inherit-product, device/__MANUFACTURER__/__DEVICE__/device___DEVICE__.mk)
+
+## Device identifier. This must come after all inclusions
+PRODUCT_DEVICE := __DEVICE__
+PRODUCT_NAME := cm___DEVICE__
+PRODUCT_BRAND := __MANUFACTURER__
+PRODUCT_MODEL := __DEVICE__
+PRODUCT_MANUFACTURER := __MANUFACTURER__
diff --git a/tools/device/device.mk.template b/tools/device/device.mk.template
new file mode 100644
index 0000000..91ffdc9
--- /dev/null
+++ b/tools/device/device.mk.template
@@ -0,0 +1,24 @@
+$(call inherit-product, $(SRC_TARGET_DIR)/product/languages_full.mk)
+
+# The gps config appropriate for this device
+$(call inherit-product, device/common/gps/gps_us_supl.mk)
+
+$(call inherit-product-if-exists, vendor/__MANUFACTURER__/__DEVICE__/__DEVICE__-vendor.mk)
+
+DEVICE_PACKAGE_OVERLAYS += device/__MANUFACTURER__/__DEVICE__/overlay
+
+
+ifeq ($(TARGET_PREBUILT_KERNEL),)
+ LOCAL_KERNEL := device/__MANUFACTURER__/__DEVICE__/kernel
+else
+ LOCAL_KERNEL := $(TARGET_PREBUILT_KERNEL)
+endif
+
+PRODUCT_COPY_FILES += \
+ $(LOCAL_KERNEL):kernel
+
+$(call inherit-product, build/target/product/full.mk)
+
+PRODUCT_BUILD_PROP_OVERRIDES += BUILD_UTC_DATE=0
+PRODUCT_NAME := full___DEVICE__
+PRODUCT_DEVICE := __DEVICE__
diff --git a/tools/device/mkvendor.sh b/tools/device/mkvendor.sh
new file mode 100755
index 0000000..18671c0
--- /dev/null
+++ b/tools/device/mkvendor.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+function usage
+{
+ echo Usage:
+ echo " $(basename $0) manufacturer device [boot.img]"
+ echo " The boot.img argument is the extracted recovery or boot image."
+ echo " The boot.img argument should not be provided for devices"
+ echo " that have non standard boot images (ie, Samsung)."
+ echo
+ echo Example:
+ echo " $(basename $0) motorola sholes ~/Downloads/recovery-sholes.img"
+ exit 0
+}
+
+MANUFACTURER=$1
+DEVICE=$2
+BOOTIMAGE=$3
+
+UNPACKBOOTIMG=$(which unpackbootimg)
+
+echo Arguments: $@
+
+if [ -z "$MANUFACTURER" ]
+then
+ usage
+fi
+
+if [ -z "$DEVICE" ]
+then
+ usage
+fi
+
+ANDROID_TOP=$(dirname $0)/../../../
+pushd $ANDROID_TOP > /dev/null
+ANDROID_TOP=$(pwd)
+popd > /dev/null
+
+TEMPLATE_DIR=$(dirname $0)
+pushd $TEMPLATE_DIR > /dev/null
+TEMPLATE_DIR=$(pwd)
+popd > /dev/null
+
+DEVICE_DIR=$ANDROID_TOP/device/$MANUFACTURER/$DEVICE
+
+if [ ! -z "$BOOTIMAGE" ]
+then
+ if [ -z "$UNPACKBOOTIMG" ]
+ then
+ echo unpackbootimg not found. Is your android build environment set up and have the host tools been built?
+ exit 0
+ fi
+
+ BOOTIMAGEFILE=$(basename $BOOTIMAGE)
+
+ echo Output will be in $DEVICE_DIR
+ mkdir -p $DEVICE_DIR
+
+ TMPDIR=/tmp/$(whoami)/bootimg
+ rm -rf $TMPDIR
+ mkdir -p $TMPDIR
+ cp $BOOTIMAGE $TMPDIR
+ pushd $TMPDIR > /dev/null
+ unpackbootimg -i $BOOTIMAGEFILE > /dev/null
+ mkdir ramdisk
+ pushd ramdisk > /dev/null
+ gunzip -c ../$BOOTIMAGEFILE-ramdisk.gz | cpio -i
+ popd > /dev/null
+ BASE=$(cat $TMPDIR/$BOOTIMAGEFILE-base)
+ CMDLINE=$(cat $TMPDIR/$BOOTIMAGEFILE-cmdline)
+ PAGESIZE=$(cat $TMPDIR/$BOOTIMAGEFILE-pagesize)
+ export SEDCMD="s#__CMDLINE__#$CMDLINE#g"
+ echo $SEDCMD > $TMPDIR/sedcommand
+ cp $TMPDIR/$BOOTIMAGEFILE-zImage $DEVICE_DIR/kernel
+ popd > /dev/null
+else
+ mkdir -p $DEVICE_DIR
+ touch $DEVICE_DIR/kernel
+ BASE=10000000
+ CMDLINE=no_console_suspend
+ PAGESIZE=00000800
+ export SEDCMD="s#__CMDLINE__#$CMDLINE#g"
+ echo $SEDCMD > $TMPDIR/sedcommand
+fi
+
+for file in $(find $TEMPLATE_DIR -name '*.template')
+do
+ OUTPUT_FILE=$DEVICE_DIR/$(basename $(echo $file | sed s/\\.template//g))
+ cat $file | sed s/__DEVICE__/$DEVICE/g | sed s/__MANUFACTURER__/$MANUFACTURER/g | sed -f $TMPDIR/sedcommand | sed s/__BASE__/$BASE/g | sed s/__PAGE_SIZE__/$PAGESIZE/g > $OUTPUT_FILE
+done
+
+if [ ! -z "$TMPDIR" ]
+then
+ RECOVERY_FSTAB=$TMPDIR/ramdisk/etc/recovery.fstab
+ if [ -f "$RECOVERY_FSTAB" ]
+ then
+ cp $RECOVERY_FSTAB $DEVICE_DIR/recovery.fstab
+ fi
+fi
+
+
+mv $DEVICE_DIR/device.mk $DEVICE_DIR/device_$DEVICE.mk
+
+echo Creating initial git repository.
+pushd $DEVICE_DIR
+git init
+git add .
+git commit -a -m "mkvendor.sh: Initial commit of $DEVICE"
+popd
+
+echo Done!
+echo Use the following command to set up your build environment:
+echo ' 'lunch cm_$DEVICE-eng
diff --git a/tools/device/recovery.fstab.template b/tools/device/recovery.fstab.template
new file mode 100644
index 0000000..41fb92e
--- /dev/null
+++ b/tools/device/recovery.fstab.template
@@ -0,0 +1,10 @@
+# mount point fstype device [device2]
+
+/boot mtd boot
+/cache yaffs2 cache
+/data yaffs2 userdata
+/misc mtd misc
+/recovery mtd recovery
+/sdcard vfat /dev/block/mmcblk0p1 /dev/block/mmcblk0
+/system yaffs2 system
+/sd-ext ext4 /dev/block/mmcblk0p2
diff --git a/tools/device/system.prop.template b/tools/device/system.prop.template
new file mode 100644
index 0000000..4113929
--- /dev/null
+++ b/tools/device/system.prop.template
@@ -0,0 +1,3 @@
+#
+# system.prop for __DEVICE__
+#
diff --git a/tools/diff_package_overlays.py b/tools/diff_package_overlays.py
index 0e2c773..687e1d0 100755
--- a/tools/diff_package_overlays.py
+++ b/tools/diff_package_overlays.py
@@ -34,11 +34,13 @@ Format of current_overlays.txt and previous_overlays.txt:
...
"""
+from __future__ import print_function
+
import sys
def main(argv):
if len(argv) != 4:
- print >> sys.stderr, __doc__
+ print(sys.stderr, __doc__)
sys.exit(1)
f = open(argv[1])
@@ -85,7 +87,7 @@ def main(argv):
# Print out the package names that have overlay change.
for r in result:
- print r
+ print(r)
if __name__ == "__main__":
main(sys.argv)
diff --git a/tools/event_log_tags.py b/tools/event_log_tags.py
index 645839e..93244a4 100644
--- a/tools/event_log_tags.py
+++ b/tools/event_log_tags.py
@@ -14,6 +14,8 @@
"""A module for reading and parsing event-log-tags files."""
+from __future__ import print_function
+
import re
import sys
@@ -55,7 +57,7 @@ class TagFile(object):
if file_object is None:
try:
file_object = open(filename, "rb")
- except (IOError, OSError), e:
+ except (IOError, OSError) as e:
self.AddError(str(e))
return
@@ -100,7 +102,7 @@ class TagFile(object):
self.tags.append(Tag(tag, tagname, description,
self.filename, self.linenum))
- except (IOError, OSError), e:
+ except (IOError, OSError) as e:
self.AddError(str(e))
@@ -130,6 +132,6 @@ def WriteOutput(output_file, data):
out = open(output_file, "wb")
out.write(data)
out.close()
- except (IOError, OSError), e:
- print >> sys.stderr, "failed to write %s: %s" % (output_file, e)
+ except (IOError, OSError) as e:
+ print("failed to write %s: %s" % (output_file, e), file=sys.stderr)
sys.exit(1)
diff --git a/tools/fileslist.py b/tools/fileslist.py
index a11efaa..1538a30 100755
--- a/tools/fileslist.py
+++ b/tools/fileslist.py
@@ -15,11 +15,13 @@
# limitations under the License.
#
+from __future__ import print_function
+
import operator, os, sys
def get_file_size(path):
st = os.lstat(path)
- return st.st_size;
+ return st.st_size
def main(argv):
output = []
@@ -39,7 +41,7 @@ def main(argv):
pass
output.sort(key=operator.itemgetter(0), reverse=True)
for row in output:
- print "%12d %s" % row
+ print("%12d %s" % row)
if __name__ == '__main__':
main(sys.argv)
diff --git a/tools/filter-product-graph.py b/tools/filter-product-graph.py
index b3a5b42..d6100d8 100755
--- a/tools/filter-product-graph.py
+++ b/tools/filter-product-graph.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python
# vim: ts=2 sw=2 nocindent
+from __future__ import print_function
+
import re
import sys
@@ -55,13 +57,13 @@ def main():
deps = [dep for dep in deps if dep[1] in included]
infos = [info for info in infos if info[0] in included]
- print "digraph {"
- print "graph [ ratio=.5 ];"
+ print("digraph {")
+ print("graph [ ratio=.5 ];")
for dep in deps:
- print '"%s" -> "%s"' % dep
+ print('"%s" -> "%s"' % dep)
for info in infos:
- print '"%s"%s' % info
- print "}"
+ print('"%s"%s' % info)
+ print("}")
if __name__ == "__main__":
diff --git a/tools/findleaves.py b/tools/findleaves.py
index 3a9e508..d97ed74 100755
--- a/tools/findleaves.py
+++ b/tools/findleaves.py
@@ -20,12 +20,14 @@
# the search in a given subdirectory when the file is found.
#
+from __future__ import print_function
+
import os
import sys
def perform_find(mindepth, prune, dirlist, filename):
result = []
- pruneleaves = set(map(lambda x: os.path.split(x)[1], prune))
+ pruneleaves = set([os.path.split(x)[1] for x in prune])
for rootdir in dirlist:
rootdepth = rootdir.count("/")
for root, dirs, files in os.walk(rootdir, followlinks=True):
@@ -92,7 +94,7 @@ def main(argv):
results = list(set(perform_find(mindepth, prune, dirlist, filename)))
results.sort()
for r in results:
- print r
+ print(r)
if __name__ == "__main__":
main(sys.argv)
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index 4571b70..36630db 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -20,6 +20,9 @@ Generate the Android notice files, including both text and html files.
-h to display this usage message and exit.
"""
+
+from __future__ import print_function
+
from collections import defaultdict
import getopt
import hashlib
@@ -40,22 +43,22 @@ HTML_ESCAPE_TABLE = {
try:
opts, args = getopt.getopt(sys.argv[1:], "h")
-except getopt.GetoptError, err:
- print str(err)
- print __doc__
+except getopt.GetoptError as err:
+ print(str(err))
+ print(__doc__)
sys.exit(2)
for o, a in opts:
if o == "-h":
- print __doc__
+ print(__doc__)
sys.exit(2)
else:
- print >> sys.stderr, "unhandled option %s" % (o,)
+ print("unhandled option %s" % o)
if len(args) != 4:
- print """need exactly four arguments, the two output files, the file title
- and the directory containing notices, not %d""" % (len(args),)
- print __doc__
+ print("""need exactly four arguments, the two output files, the file title
+ and the directory containing notices, not %d""" % len(args))
+ print(__doc__)
sys.exit(1)
def hexify(s):
@@ -107,13 +110,13 @@ def combine_notice_files_html(file_hash, input_dir, output_filename):
# Open the output file, and output the header pieces
output_file = open(output_filename, "wb")
- print >> output_file, "<html><head>"
- print >> output_file, HTML_OUTPUT_CSS
- print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">'
+ print(output_file, "<html><head>", file=output_file)
+ print(HTML_OUTPUT_CSS, file=output_file)
+ print('</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">', file=output_file)
# Output our table of contents
- print >> output_file, '<div class="toc">'
- print >> output_file, "<ul>"
+ print('<div class="toc">', file=output_file)
+ print("<ul>", file=output_file)
# Flatten the list of lists into a single list of filenames
sorted_filenames = sorted(itertools.chain.from_iterable(file_hash.values()))
@@ -121,31 +124,31 @@ def combine_notice_files_html(file_hash, input_dir, output_filename):
# Print out a nice table of contents
for filename in sorted_filenames:
stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
- print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename)
+ print('<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename), file=output_file)
- print >> output_file, "</ul>"
- print >> output_file, "</div><!-- table of contents -->"
+ print("</ul>", file=output_file)
+ print("</div><!-- table of contents -->", file=output_file)
# Output the individual notice file lists
- print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">'
+ print('<table cellpadding="0" cellspacing="0" border="0">', file=output_file)
for value in file_hash.values():
- print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0])
- print >> output_file, '<div class="label">Notices for file(s):</div>'
- print >> output_file, '<div class="file-list">'
+ print('<tr id="id%d"><td class="same-license">' % id_table.get(value[0]), file=output_file)
+ print('<div class="label">Notices for file(s):</div>', file=output_file)
+ print('<div class="file-list">', file=output_file)
for filename in sorted(value):
- print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename))
- print >> output_file, "</div><!-- file-list -->"
- print >> output_file
- print >> output_file, '<pre class="license-text">'
- print >> output_file, html_escape(open(value[0]).read())
- print >> output_file, "</pre><!-- license-text -->"
- print >> output_file, "</td></tr><!-- same-license -->"
- print >> output_file
- print >> output_file
- print >> output_file
+ print("%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename)), file=output_file)
+ print("</div><!-- file-list -->", file=output_file)
+ print(file=output_file)
+ print('<pre class="license-text">', file=output_file)
+ print(html_escape(open(value[0]).read()), file=output_file)
+ print("</pre><!-- license-text -->", file=output_file)
+ print("</td></tr><!-- same-license -->", file=output_file)
+ print(file=output_file)
+ print(file=output_file)
+ print(file=output_file)
# Finish off the file output
- print >> output_file, "</table>"
- print >> output_file, "</body></html>"
+ print( "</table>", file=output_file)
+ print("</body></html>", file=output_file)
output_file.close()
def combine_notice_files_text(file_hash, input_dir, output_filename, file_title):
@@ -153,14 +156,14 @@ def combine_notice_files_text(file_hash, input_dir, output_filename, file_title)
SRC_DIR_STRIP_RE = re.compile(input_dir + "(/.*).txt")
output_file = open(output_filename, "wb")
- print >> output_file, file_title
+ print(file_title, file=output_file)
for value in file_hash.values():
- print >> output_file, "============================================================"
- print >> output_file, "Notices for file(s):"
+ print("============================================================", file=output_file)
+ print("Notices for file(s):", file=output_file)
for filename in sorted(value):
- print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename)
- print >> output_file, "------------------------------------------------------------"
- print >> output_file, open(value[0]).read()
+ print(SRC_DIR_STRIP_RE.sub(r"\1", filename), file=output_file)
+ print("------------------------------------------------------------", file=output_file)
+ print(open(value[0]).read(), file=output_file)
output_file.close()
def main(args):
@@ -179,9 +182,9 @@ def main(args):
files_with_same_hash[file_md5sum].append(filename)
- print "Combining NOTICE files into HTML"
+ print("Combining NOTICE files into HTML")
combine_notice_files_html(files_with_same_hash, input_dir, html_output_file)
- print "Combining NOTICE files into text"
+ print("Combining NOTICE files into text")
combine_notice_files_text(files_with_same_hash, input_dir, txt_output_file, file_title)
if __name__ == "__main__":
diff --git a/tools/getb64key.py b/tools/getb64key.py
new file mode 100755
index 0000000..976a157
--- /dev/null
+++ b/tools/getb64key.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import base64
+import sys
+
+pkFile = open(sys.argv[1], 'rb').readlines()
+base64Key = ""
+inCert = False
+for line in pkFile:
+ if line.startswith(b"-"):
+ inCert = not inCert
+ continue
+
+ base64Key += line.strip()
+
+print(base64.b16encode(base64.b64decode(base64Key)).lower())
diff --git a/tools/java-event-log-tags.py b/tools/java-event-log-tags.py
index f364751..24bad3c 100755
--- a/tools/java-event-log-tags.py
+++ b/tools/java-event-log-tags.py
@@ -23,37 +23,43 @@ tags in the given input file.
-h to display this usage message and exit.
"""
-import cStringIO
+from __future__ import print_function
+
import getopt
import os
import os.path
import re
import sys
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
import event_log_tags
output_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:")
-except getopt.GetoptError, err:
- print str(err)
- print __doc__
+except getopt.GetoptError as err:
+ print(str(err))
+ print(__doc__)
sys.exit(2)
for o, a in opts:
if o == "-h":
- print __doc__
+ print(__doc__)
sys.exit(2)
elif o == "-o":
output_file = a
else:
- print >> sys.stderr, "unhandled option %s" % (o,)
+ print("unhandled option %s" % o, file=sys.stderr)
sys.exit(1)
if len(args) != 2:
- print "need exactly two input files, not %d" % (len(args),)
- print __doc__
+ print("need exactly two input files, not %d" % len(args))
+ print(__doc__)
sys.exit(1)
fn = args[0]
@@ -85,10 +91,10 @@ if "javadoc_hide" in tagfile.options:
if tagfile.errors:
for fn, ln, msg in tagfile.errors:
- print >> sys.stderr, "%s:%d: error: %s" % (fn, ln, msg)
+ print("%s:%d: error: %s" % (fn, ln, msg), file=sys.stderr)
sys.exit(1)
-buffer = cStringIO.StringIO()
+buffer = StringIO()
buffer.write("/* This file is auto-generated. DO NOT MODIFY.\n"
" * Source file: %s\n"
" */\n\n" % (fn,))
@@ -143,7 +149,7 @@ for t in tagfile.tags:
buffer.write("\n }\n")
-buffer.write("}\n");
+buffer.write("}\n")
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
diff --git a/tools/java-layers.py b/tools/java-layers.py
index b3aec2b..3b9d802 100755
--- a/tools/java-layers.py
+++ b/tools/java-layers.py
@@ -1,9 +1,18 @@
#!/usr/bin/env python
+from __future__ import print_function
+
import os
import re
import sys
+
+def itervalues(obj):
+ if hasattr(obj, 'itervalues'):
+ return obj.itervalues()
+ return obj.values()
+
+
def fail_with_usage():
sys.stderr.write("usage: java-layers.py DEPENDENCY_FILE SOURCE_DIRECTORIES...\n")
sys.stderr.write("\n")
@@ -69,27 +78,27 @@ class Dependencies:
if upper in deps:
recurse(obj, deps[upper], visited)
self.deps = deps
- self.parts = [(dep.lower.split('.'),dep) for dep in deps.itervalues()]
+ self.parts = [(dep.lower.split('.'),dep) for dep in itervalues(deps)]
# transitive closure of dependencies
- for dep in deps.itervalues():
+ for dep in itervalues(deps):
recurse(dep, dep, [])
# disallow everything from the low level components
- for dep in deps.itervalues():
+ for dep in itervalues(deps):
if dep.lowlevel:
- for d in deps.itervalues():
+ for d in itervalues(deps):
if dep != d and not d.legacy:
dep.transitive.add(d.lower)
# disallow the 'top' components everywhere but in their own package
- for dep in deps.itervalues():
+ for dep in itervalues(deps):
if dep.top and not dep.legacy:
- for d in deps.itervalues():
+ for d in itervalues(deps):
if dep != d and not d.legacy:
d.transitive.add(dep.lower)
- for dep in deps.itervalues():
+ for dep in itervalues(deps):
dep.transitive = set([x+"." for x in dep.transitive])
if False:
- for dep in deps.itervalues():
- print "-->", dep.lower, "-->", dep.transitive
+ for dep in itervalues(deps):
+ print("-->", dep.lower, "-->", dep.transitive)
# Lookup the dep object for the given package. If pkg is a subpackage
# of one with a rule, that one will be returned. If no matches are found,
@@ -117,7 +126,7 @@ class Dependencies:
def parse_dependency_file(filename):
global err
- f = file(filename)
+ f = open(filename)
lines = f.readlines()
f.close()
def lineno(s, i):
@@ -171,7 +180,7 @@ def find_java_files(srcs):
result = []
for d in srcs:
if d[0] == '@':
- f = file(d[1:])
+ f = open(d[1:])
result.extend([fn for fn in [s.strip() for s in f.readlines()]
if len(fn) != 0])
f.close()
@@ -188,7 +197,7 @@ IMPORT = re.compile("import\s+(.*)")
def examine_java_file(deps, filename):
global err
# Yes, this is a crappy java parser. Write a better one if you want to.
- f = file(filename)
+ f = open(filename)
text = f.read()
f.close()
text = COMMENTS.sub("", text)
@@ -218,8 +227,8 @@ def examine_java_file(deps, filename):
imports.append(m.group(1))
# Do the checking
if False:
- print filename
- print "'%s' --> %s" % (pkg, imports)
+ print(filename)
+ print("'%s' --> %s" % (pkg, imports))
dep = deps.lookup(pkg)
if not dep:
sys.stderr.write(("%s: Error: Package does not appear in dependency file: "
diff --git a/tools/merge-event-log-tags.py b/tools/merge-event-log-tags.py
index 64bad3f..7790048 100755
--- a/tools/merge-event-log-tags.py
+++ b/tools/merge-event-log-tags.py
@@ -24,7 +24,8 @@ and fails if they do.
-h to display this usage message and exit.
"""
-import cStringIO
+from __future__ import print_function
+
import getopt
try:
import hashlib
@@ -33,8 +34,20 @@ except ImportError:
import struct
import sys
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
import event_log_tags
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
+
errors = []
warnings = []
@@ -48,21 +61,21 @@ ASSIGN_LIMIT = 1000000
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:m:")
-except getopt.GetoptError, err:
- print str(err)
- print __doc__
+except getopt.GetoptError as err:
+ print(str(err))
+ print(__doc__)
sys.exit(2)
for o, a in opts:
if o == "-h":
- print __doc__
+ print(__doc__)
sys.exit(2)
elif o == "-o":
output_file = a
elif o == "-m":
pre_merged_file = a
else:
- print >> sys.stderr, "unhandled option %s" % (o,)
+ print("unhandled option %s" % (o,), file=sys.stderr)
sys.exit(1)
# Restrictions on tags:
@@ -133,12 +146,12 @@ for fn in args:
if errors:
for fn, ln, msg in errors:
- print >> sys.stderr, "%s:%d: error: %s" % (fn, ln, msg)
+ print("%s:%d: error: %s" % (fn, ln, msg), file=sys.stderr)
sys.exit(1)
if warnings:
for fn, ln, msg in warnings:
- print >> sys.stderr, "%s:%d: warning: %s" % (fn, ln, msg)
+ print("%s:%d: warning: %s" % (fn, ln, msg), file=sys.stderr)
# Python's hash function (a) isn't great and (b) varies between
# versions of python. Using md5 is overkill here but is the same from
@@ -154,14 +167,14 @@ def hashname(str):
# If we were provided pre-merged tags (w/ the -m option), then don't
# ever try to allocate one, just fail if we don't have a number
-for name, t in sorted(by_tagname.iteritems()):
+for name, t in sorted(iteritems(by_tagname)):
if t.tagnum is None:
if pre_merged_tags:
try:
t.tagnum = pre_merged_tags[t.tagname]
except KeyError:
- print >> sys.stderr, ("Error: Tag number not defined for tag `%s'."
- +" Have you done a full build?") % t.tagname
+ print("Error: Tag number not defined for tag `%s'."
+ " Have you done a full build?" % t.tagname, file=sys.stderr)
sys.exit(1)
else:
while True:
@@ -174,8 +187,8 @@ for name, t in sorted(by_tagname.iteritems()):
# by_tagnum should be complete now; we've assigned numbers to all tags.
-buffer = cStringIO.StringIO()
-for n, t in sorted(by_tagnum.iteritems()):
+buffer = StringIO()
+for n, t in sorted(iteritems(by_tagnum)):
if t.description:
buffer.write("%d %s %s\n" % (t.tagnum, t.tagname, t.description))
else:
diff --git a/tools/parsedeps.py b/tools/parsedeps.py
index 32d8ad7..d36442b 100755
--- a/tools/parsedeps.py
+++ b/tools/parsedeps.py
@@ -1,10 +1,16 @@
#!/usr/bin/env python
# vim: ts=2 sw=2
+from __future__ import print_function
+
import optparse
-import re
import sys
+try:
+ raw_input
+except NameError:
+ raw_input = input
+
class Dependency:
def __init__(self, tgt):
@@ -43,13 +49,15 @@ class Dependencies:
t.pos = pos
def get(self, tgt):
- if self.lines.has_key(tgt):
+ if tgt in self.lines:
return self.lines[tgt]
else:
return None
def __iter__(self):
- return self.lines.iteritems()
+ if hasattr(self.lines, 'iteritems'):
+ return self.lines.iteritems()
+ return iter(self.lines.items())
def trace(self, tgt, prereq):
self.__visit = self.__visit + 1
@@ -73,9 +81,9 @@ class Dependencies:
return result
def help():
- print "Commands:"
- print " dep TARGET Print the prerequisites for TARGET"
- print " trace TARGET PREREQ Print the paths from TARGET to PREREQ"
+ print("Commands:")
+ print(" dep TARGET Print the prerequisites for TARGET")
+ print(" trace TARGET PREREQ Print the paths from TARGET to PREREQ")
def main(argv):
@@ -87,7 +95,7 @@ def main(argv):
deps = Dependencies()
filename = args[0]
- print "Reading %s" % filename
+ print("Reading %s" % filename)
if True:
f = open(filename)
@@ -106,7 +114,7 @@ def main(argv):
deps.add(tgt, prereq)
f.close()
- print "Read %d dependencies. %d targets." % (deps.count, len(deps.lines))
+ print("Read %d dependencies. %d targets." % (deps.count, len(deps.lines)))
while True:
line = raw_input("target> ")
if not line.strip():
@@ -118,12 +126,12 @@ def main(argv):
d = deps.get(tgt)
if d:
for prereq in d.prereqs:
- print prereq.tgt
+ print(prereq.tgt)
elif len(split) == 3 and cmd == "trace":
tgt = split[1]
prereq = split[2]
if False:
- print "from %s to %s" % (tgt, prereq)
+ print("from %s to %s" % (tgt, prereq))
trace = deps.trace(tgt, prereq)
if trace:
width = 0
@@ -134,10 +142,10 @@ def main(argv):
for g in trace:
for t in g:
if t.pos:
- print t.tgt, " " * (width-len(t.tgt)), " #", t.pos
+ print(t.tgt, " " * (width-len(t.tgt)), " #", t.pos)
else:
- print t.tgt
- print
+ print(t.tgt)
+ print()
else:
help()
@@ -145,7 +153,6 @@ if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
- print
+ print()
except EOFError:
- print
-
+ print()
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index fa6106f..64af01d 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -16,6 +16,13 @@
import sys
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
+
# Usage: post_process_props.py file.prop [blacklist_key, ...]
# Blacklisted keys are removed from the property file, if present
@@ -27,15 +34,22 @@ PROP_VALUE_MAX = 91
# Put the modifications that you need to make into the /system/build.prop into this
# function. The prop object has get(name) and put(name,value) methods.
-def mangle_build_prop(prop):
+def mangle_build_prop(prop, overrides):
+ if len(overrides) == 0:
+ return
+ overridelist = overrides.replace(" ",",").split(",")
+ for proppair in overridelist:
+ values = proppair.split("=")
+ prop.put(values[0], values[1])
+
pass
# Put the modifications that you need to make into the /default.prop into this
# function. The prop object has get(name) and put(name,value) methods.
def mangle_default_prop(prop):
- # If ro.debuggable is 1, then enable adb on USB by default
- # (this is for userdebug builds)
- if prop.get("ro.debuggable") == "1":
+ # If ro.adb.secure is not 1, then enable adb on USB by default
+ # (this is for eng builds)
+ if prop.get("ro.adb.secure") != "1":
val = prop.get("persist.sys.usb.config")
if val == "":
val = "adb"
@@ -46,7 +60,7 @@ def mangle_default_prop(prop):
# default to "adb". That might not the right policy there, but it's better
# to be explicit.
if not prop.get("persist.sys.usb.config"):
- prop.put("persist.sys.usb.config", "none");
+ prop.put("persist.sys.usb.config", "none")
def validate(prop):
"""Validate the properties.
@@ -56,7 +70,7 @@ def validate(prop):
"""
check_pass = True
buildprops = prop.to_dict()
- for key, value in buildprops.iteritems():
+ for key, value in iteritems(buildprops):
# Check build properties' length.
if len(key) > PROP_NAME_MAX:
check_pass = False
@@ -110,6 +124,10 @@ class PropFile:
def main(argv):
filename = argv[1]
+ if (len(argv) > 2):
+ extraargs = argv[2]
+ else:
+ extraargs = ""
f = open(filename)
lines = f.readlines()
f.close()
@@ -117,7 +135,7 @@ def main(argv):
properties = PropFile(lines)
if filename.endswith("/build.prop"):
- mangle_build_prop(properties)
+ mangle_build_prop(properties, extraargs)
elif filename.endswith("/default.prop"):
mangle_default_prop(properties)
else:
@@ -128,7 +146,7 @@ def main(argv):
sys.exit(1)
# Drop any blacklisted keys
- for key in argv[2:]:
+ for key in argv[3:]:
properties.delete(key)
f = open(filename, 'w+')
diff --git a/tools/product_debug.py b/tools/product_debug.py
index ff2657c..1433a9a 100755
--- a/tools/product_debug.py
+++ b/tools/product_debug.py
@@ -14,13 +14,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
+from __future__ import print_function
+
+from operator import itemgetter
import re
import sys
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
+
def break_lines(key, val):
# these don't get split
- if key in ("PRODUCT_MODEL"):
+ if key in ("PRODUCT_MODEL",):
return (key,val)
return (key, "\n".join(val.split()))
@@ -42,8 +51,7 @@ def parse_variables(lines):
def render_variables(variables):
variables = dict(variables)
del variables["FILE"]
- variables = list(variables.iteritems())
- variables.sort(lambda a, b: cmp(a[0], b[0]))
+ variables = sorted(variables.items(), key=itemgetter(0))
return ("<table id='variables'>"
+ "\n".join([ "<tr><th>%(key)s</th><td>%(val)s</td></tr>" % { "key": key, "val": val }
for key,val in variables])
@@ -69,7 +77,7 @@ def render_original(variables, text):
return text
def read_file(fn):
- f = file(fn)
+ f = open(fn)
text = f.read()
f.close()
return text
@@ -104,7 +112,7 @@ def main(argv):
"variables": render_variables(variables),
"original": render_original(variables, original),
})
- print """<html>
+ print("""<html>
<head>
@@ -153,7 +161,7 @@ def main(argv):
%(variables)s
</body>
</html>
-""" % values
+""" % values)
if __name__ == "__main__":
main(sys.argv)
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index f2bf1e1..b699135 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -22,10 +22,12 @@ add them to the zipfile.
Usage: add_img_to_target_files target_files
"""
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
import datetime
@@ -51,7 +53,7 @@ def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None):
prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
if os.path.exists(prebuilt_path):
- print "system.img already exists in %s, no need to rebuild..." % (prefix,)
+ print("system.img already exists in %s, no need to rebuild..." % prefix)
return
def output_sink(fn, data):
@@ -60,7 +62,7 @@ def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None):
ofile.close()
if OPTIONS.rebuild_recovery:
- print "Building new recovery patch"
+ print("Building new recovery patch")
common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
boot_img, info_dict=OPTIONS.info_dict)
@@ -83,7 +85,7 @@ def AddVendor(output_zip, prefix="IMAGES/"):
prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
if os.path.exists(prebuilt_path):
- print "vendor.img already exists in %s, no need to rebuild..." % (prefix,)
+ print("vendor.img already exists in %s, no need to rebuild..." % prefix)
return
block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
@@ -98,9 +100,32 @@ def BuildVendor(input_dir, info_dict, block_list=None):
file containing it."""
return CreateImage(input_dir, info_dict, "vendor", block_list=block_list)
+def AddOem(output_zip, prefix="IMAGES/"):
+ """Turn the contents of OEM into a oem image and store in it
+ output_zip."""
+
+ prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "oem.img")
+ if os.path.exists(prebuilt_path):
+ print("oem.img already exists in %s, no need to rebuild..." % prefix)
+ return
+
+ block_list = common.MakeTempFile(prefix="oem-blocklist-", suffix=".map")
+ imgname = BuildOem(OPTIONS.input_tmp, OPTIONS.info_dict,
+ block_list=block_list)
+ with open(imgname, "rb") as f:
+ common.ZipWriteStr(output_zip, prefix + "oem.img", f.read())
+ with open(block_list, "rb") as f:
+ common.ZipWriteStr(output_zip, prefix + "oem.map", f.read())
+
+
+def BuildOem(input_dir, info_dict, block_list=None):
+ """Build the (sparse) oem image and return the name of a temp
+ file containing it."""
+ return CreateImage(input_dir, info_dict, "oem", block_list=block_list)
+
def CreateImage(input_dir, info_dict, what, block_list=None):
- print "creating " + what + ".img..."
+ print("creating " + what + ".img...")
img = common.MakeTempFile(prefix=what + "-", suffix=".img")
@@ -167,7 +192,7 @@ def AddUserdata(output_zip, prefix="IMAGES/"):
prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
if os.path.exists(prebuilt_path):
- print "userdata.img already exists in %s, no need to rebuild..." % (prefix,)
+ print("userdata.img already exists in %s, no need to rebuild..." % prefix)
return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
@@ -178,7 +203,7 @@ def AddUserdata(output_zip, prefix="IMAGES/"):
not image_props.get("partition_size")):
return
- print "creating userdata.img..."
+ print("creating userdata.img...")
# Use a fixed timestamp (01/01/2009) when packaging the image.
# Bug: 24377993
@@ -207,12 +232,59 @@ def AddUserdata(output_zip, prefix="IMAGES/"):
os.rmdir(temp_dir)
+def AddUserdataExtra(output_zip, prefix="IMAGES/"):
+ """Create extra userdata image and store it in output_zip."""
+
+ image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
+ "data_extra")
+
+ # The build system has to explicitly request extra userdata.
+ if "fs_type" not in image_props:
+ return
+
+ extra_name = image_props.get("partition_name", "extra")
+
+ prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata_%s.img" % extra_name)
+ if os.path.exists(prebuilt_path):
+ print("userdata_%s.img already exists in %s, no need to rebuild..." % (extra_name, prefix,))
+ return
+
+ # We only allow yaffs to have a 0/missing partition_size.
+ # Extfs, f2fs must have a size. Skip userdata_extra.img if no size.
+ if (not image_props.get("fs_type", "").startswith("yaffs") and
+ not image_props.get("partition_size")):
+ return
+
+ print("creating userdata_%s.img..." % extra_name)
+
+ # The name of the directory it is making an image out of matters to
+ # mkyaffs2image. So we create a temp dir, and within it we create an
+ # empty dir named "data", and build the image from that.
+ temp_dir = tempfile.mkdtemp()
+ user_dir = os.path.join(temp_dir, "data")
+ os.mkdir(user_dir)
+ img = tempfile.NamedTemporaryFile()
+
+ fstab = OPTIONS.info_dict["fstab"]
+ if fstab:
+ image_props["fs_type" ] = fstab["/data"].fs_type
+ succ = build_image.BuildImage(user_dir, image_props, img.name)
+ assert succ, "build userdata_%s.img image failed" % extra_name
+
+ # Disable size check since this fetches original data partition size
+ #common.CheckSize(img.name, "userdata_extra.img", OPTIONS.info_dict)
+ output_zip.write(img.name, prefix + "userdata_%s.img" % extra_name)
+ img.close()
+ os.rmdir(user_dir)
+ os.rmdir(temp_dir)
+
+
def AddCache(output_zip, prefix="IMAGES/"):
"""Create an empty cache image and store it in output_zip."""
prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
if os.path.exists(prebuilt_path):
- print "cache.img already exists in %s, no need to rebuild..." % (prefix,)
+ print("cache.img already exists in %s, no need to rebuild..." % prefix)
return
image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict,
@@ -221,7 +293,7 @@ def AddCache(output_zip, prefix="IMAGES/"):
if "fs_type" not in image_props:
return
- print "creating cache.img..."
+ print("creating cache.img...")
# Use a fixed timestamp (01/01/2009) when packaging the image.
# Bug: 24377993
@@ -256,7 +328,7 @@ def AddImagesToTargetFiles(filename):
if not OPTIONS.add_missing:
for n in input_zip.namelist():
if n.startswith("IMAGES/"):
- print "target_files appears to already contain images."
+ print("target_files appears to already contain images.")
sys.exit(1)
try:
@@ -265,6 +337,12 @@ def AddImagesToTargetFiles(filename):
except KeyError:
has_vendor = False
+ try:
+ input_zip.getinfo("OEM/")
+ has_oem = True
+ except KeyError:
+ has_oem = False
+
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
if "selinux_fc" in OPTIONS.info_dict:
OPTIONS.info_dict["selinux_fc"] = os.path.join(
@@ -275,13 +353,13 @@ def AddImagesToTargetFiles(filename):
compression=zipfile.ZIP_DEFLATED)
def banner(s):
- print "\n\n++++ " + s + " ++++\n\n"
+ print("\n\n++++ " + s + " ++++\n\n")
banner("boot")
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img")
boot_image = None
if os.path.exists(prebuilt_path):
- print "boot.img already exists in IMAGES/, no need to rebuild..."
+ print("boot.img already exists in IMAGES/, no need to rebuild...")
if OPTIONS.rebuild_recovery:
boot_image = common.GetBootableImage(
"IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
@@ -295,7 +373,7 @@ def AddImagesToTargetFiles(filename):
recovery_image = None
prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "recovery.img")
if os.path.exists(prebuilt_path):
- print "recovery.img already exists in IMAGES/, no need to rebuild..."
+ print("recovery.img already exists in IMAGES/, no need to rebuild...")
if OPTIONS.rebuild_recovery:
recovery_image = common.GetBootableImage(
"IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
@@ -312,8 +390,14 @@ def AddImagesToTargetFiles(filename):
AddVendor(output_zip)
banner("userdata")
AddUserdata(output_zip)
+ banner("extrauserdata")
+ AddUserdataExtra(output_zip)
banner("cache")
AddCache(output_zip)
+ if has_oem:
+ banner("oem")
+ AddOem(output_zip)
+
common.ZipClose(output_zip)
@@ -347,16 +431,16 @@ def main(argv):
sys.exit(1)
AddImagesToTargetFiles(args[0])
- print "done."
+ print("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print
- print " ERROR: %s" % (e,)
- print
+ print()
+ print(" ERROR: %s" % e)
+ print()
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index cb6fc85..bb2f16d 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -379,7 +379,7 @@ class BlockImageDiff(object):
src_str.append("%s:%s" % (sh, sr.to_string_raw()))
stashes[sh] -= 1
if stashes[sh] == 0:
- free_string.append("free %s\n" % (sh))
+ free_string.append("free %s\n" % sh)
stashes.pop(sh)
heapq.heappush(free_stash_ids, sid)
@@ -483,7 +483,7 @@ class BlockImageDiff(object):
if free_string:
out.append("".join(free_string))
- if self.version >= 2:
+ if self.version >= 2 and common.OPTIONS.cache_size is not None:
# Sanity check: abort if we're going to need more stash space than
# the allowed size (cache_size * threshold). There are two purposes
# of having a threshold here. a) Part of the cache may have been
@@ -524,10 +524,16 @@ class BlockImageDiff(object):
if self.version >= 2:
max_stashed_size = max_stashed_blocks * self.tgt.blocksize
- max_allowed = common.OPTIONS.cache_size * common.OPTIONS.stash_threshold
- print("max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n" % (
- max_stashed_blocks, max_stashed_size, max_allowed,
- max_stashed_size * 100.0 / max_allowed))
+ OPTIONS = common.OPTIONS
+ if OPTIONS.cache_size is not None:
+ max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+ print("max stashed blocks: %d (%d bytes), "
+ "limit: %d bytes (%.2f%%)\n" % (
+ max_stashed_blocks, max_stashed_size, max_allowed,
+ max_stashed_size * 100.0 / max_allowed))
+ else:
+ print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
+ max_stashed_blocks, max_stashed_size))
def ReviseStashSize(self):
print("Revising stash size...")
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 357a666..d712083 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -20,16 +20,23 @@ Build image output_image_file from input_directory and properties_file.
Usage: build_image input_directory properties_file output_image_file
"""
+
+from __future__ import print_function
+
import os
import os.path
import re
import subprocess
import sys
-import commands
import common
import shutil
import tempfile
+try:
+ from commands import getstatusoutput
+except ImportError:
+ from subprocess import getstatusoutput
+
OPTIONS = common.OPTIONS
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
@@ -42,18 +49,18 @@ def RunCommand(cmd):
Returns:
A tuple of the output and the exit code.
"""
- print "Running: ", " ".join(cmd)
+ print("Running: %s" % " ".join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = p.communicate()
- print "%s" % (output.rstrip(),)
+ print("%s" % output.rstrip())
return (output, p.returncode)
def GetVerityTreeSize(partition_size):
cmd = "build_verity_tree -s %d"
cmd %= partition_size
- status, output = commands.getstatusoutput(cmd)
+ status, output = getstatusoutput(cmd)
if status:
- print output
+ print(output)
return False, 0
return True, int(output)
@@ -61,9 +68,9 @@ def GetVerityMetadataSize(partition_size):
cmd = "system/extras/verity/build_verity_metadata.py -s %d"
cmd %= partition_size
- status, output = commands.getstatusoutput(cmd)
+ status, output = getstatusoutput(cmd)
if status:
- print output
+ print(output)
return False, 0
return True, int(output)
@@ -87,10 +94,10 @@ def AdjustPartitionSizeForVerity(partition_size):
def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
cmd = "build_verity_tree -A %s %s %s" % (
FIXED_SALT, sparse_image_path, verity_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
+ print(cmd)
+ status, output = getstatusoutput(cmd)
if status:
- print "Could not build verity tree! Error: %s" % output
+ print("Could not build verity tree! Error: %s" % output)
return False
root, salt = output.split()
prop_dict["verity_root_hash"] = root
@@ -99,15 +106,32 @@ def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key):
+ verity_key = os.getenv("PRODUCT_VERITY_KEY", None)
+ verity_key_password = None
+
+ if verity_key and os.path.exists(verity_key+".pk8"):
+ verity_key_passwords = {}
+ verity_key_passwords.update(common.PasswordManager().GetPasswords(verity_key.split()))
+ verity_key_password = verity_key_passwords[verity_key]
+
cmd_template = (
"system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s")
cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key)
- print cmd
- status, output = commands.getstatusoutput(cmd)
- if status:
- print "Could not build verity metadata! Error: %s" % output
+ print(cmd)
+ runcmd = ["system/extras/verity/build_verity_metadata.py", image_size, verity_metadata_path, root_hash, salt, block_device, signer_path, key];
+ if verity_key_password is not None:
+ sp = subprocess.Popen(runcmd, stdin=subprocess.PIPE)
+ sp.communicate(verity_key_password)
+ else:
+ sp = subprocess.Popen(runcmd)
+
+ sp.wait()
+
+ if sp.returncode != 0:
+ print("Could not build verity metadata!")
return False
+
return True
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
@@ -121,10 +145,10 @@ def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""
cmd = "append2simg %s %s"
cmd %= (sparse_image_path, unsparse_image_path)
- print cmd
- status, output = commands.getstatusoutput(cmd)
+ print(cmd)
+ status, output = getstatusoutput(cmd)
if status:
- print "%s: %s" % (error_message, output)
+ print("%s: %s" % (error_message, output))
return False
return True
@@ -263,9 +287,13 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
build_command = ["mkuserimg.sh"]
if "extfs_sparse_flag" in prop_dict:
build_command.append(prop_dict["extfs_sparse_flag"])
- run_fsck = True
- build_command.extend([in_dir, out_file, fs_type,
- prop_dict["mount_point"]])
+ #run_fsck = True
+ if "is_userdataextra" in prop_dict:
+ build_command.extend([in_dir, out_file, fs_type,
+ "data"])
+ else:
+ build_command.extend([in_dir, out_file, fs_type,
+ prop_dict["mount_point"]])
build_command.append(prop_dict["partition_size"])
if "journal_size" in prop_dict:
build_command.extend(["-j", prop_dict["journal_size"]])
@@ -344,7 +372,7 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
ext4fs_stats = re.compile(
r'Created filesystem with .* (?P<used_blocks>[0-9]+)/'
r'(?P<total_blocks>[0-9]+) blocks')
- m = ext4fs_stats.match(ext4fs_output.strip().split('\n')[-1])
+ m = ext4fs_stats.match(ext4fs_output.strip().split(b'\n')[-1])
used_blocks = int(m.groupdict().get('used_blocks'))
total_blocks = int(m.groupdict().get('total_blocks'))
reserved_blocks = min(4096, int(total_blocks * 0.02))
@@ -367,7 +395,7 @@ def BuildImage(in_dir, prop_dict, out_file, target_out=None):
return False
if verity_supported and is_verity_partition:
if 2 * image_size - AdjustPartitionSizeForVerity(image_size) > partition_size:
- print "Error: No more room on %s to fit verity data" % mount_point
+ print("Error: No more room on %s to fit verity data" % mount_point)
return False
prop_dict["original_partition_size"] = prop_dict["partition_size"]
prop_dict["partition_size"] = str(image_size)
@@ -440,6 +468,11 @@ def ImagePropFromGlobalDict(glob_dict, mount_point):
copy_prop("fs_type", "fs_type")
copy_prop("userdata_fs_type", "fs_type")
copy_prop("userdata_size", "partition_size")
+ elif mount_point == "data_extra":
+ copy_prop("fs_type", "fs_type")
+ copy_prop("userdataextra_size", "partition_size")
+ copy_prop("userdataextra_name", "partition_name")
+ d["is_userdataextra"] = True
elif mount_point == "cache":
copy_prop("cache_fs_type", "fs_type")
copy_prop("cache_size", "partition_size")
@@ -474,7 +507,7 @@ def LoadGlobalDict(filename):
def main(argv):
if len(argv) != 4:
- print __doc__
+ print(__doc__)
sys.exit(1)
in_dir = argv[0]
@@ -501,14 +534,14 @@ def main(argv):
elif image_filename == "oem.img":
mount_point = "oem"
else:
- print >> sys.stderr, "error: unknown image file name ", image_filename
+ print("error: unknown image file name ", image_filename, file=sys.stderr)
exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
if not BuildImage(in_dir, image_properties, out_file, target_out):
- print >> sys.stderr, "error: failed to build %s from %s" % (out_file,
- in_dir)
+ print("error: failed to build %s from %s" % (out_file, in_dir),
+ file=sys.stderr)
exit(1)
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 5c541ab..df3be2f 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -39,10 +39,12 @@ Usage: check_target_file_signatures [flags] target_files
"""
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
import os
@@ -53,6 +55,13 @@ import zipfile
import common
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
+
# Work around a bug in python's zipfile module that prevents opening
# of zipfiles if any entry has an extra field of between 1 and 3 bytes
# (which is common with zipaligned APKs). This overrides the
@@ -81,9 +90,9 @@ def Pop():
def Banner(msg):
- print "-" * 70
- print " ", msg
- print "-" * 70
+ print("-" * 70)
+ print(" ", msg)
+ print("-" * 70)
def GetCertSubject(cert):
@@ -260,7 +269,7 @@ class TargetFiles(object):
"""Look for any instances where packages signed with different
certs request the same sharedUserId."""
apks_by_uid = {}
- for apk in self.apks.itervalues():
+ for apk in self.apks.values():
if apk.shared_uid:
apks_by_uid.setdefault(apk.shared_uid, []).append(apk)
@@ -275,15 +284,15 @@ class TargetFiles(object):
AddProblem("different cert sets for packages with uid %s" % (uid,))
- print "uid %s is shared by packages with different cert sets:" % (uid,)
+ print("uid %s is shared by packages with different cert sets:" % uid)
for apk in apks:
- print "%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename)
+ print("%-*s [%s]" % (self.max_pkg_len, apk.package, apk.filename))
for cert in apk.certs:
- print " ", ALL_CERTS.Get(cert)
- print
+ print(" ", ALL_CERTS.Get(cert))
+ print()
def CheckExternalSignatures(self):
- for apk_filename, certname in self.certmap.iteritems():
+ for apk_filename, certname in iteritems(self.certmap):
if certname == "EXTERNAL":
# Apps marked EXTERNAL should be signed with the test key
# during development, then manually re-signed after
@@ -299,26 +308,26 @@ class TargetFiles(object):
def PrintCerts(self):
"""Display a table of packages grouped by cert."""
by_cert = {}
- for apk in self.apks.itervalues():
+ for apk in self.apks.values():
for cert in apk.certs:
by_cert.setdefault(cert, []).append((apk.package, apk))
- order = [(-len(v), k) for (k, v) in by_cert.iteritems()]
+ order = [(-len(v), k) for (k, v) in iteritems(by_cert)]
order.sort()
for _, cert in order:
- print "%s:" % (ALL_CERTS.Get(cert),)
+ print("%s:" % ALL_CERTS.Get(cert))
apks = by_cert[cert]
apks.sort()
for _, apk in apks:
if apk.shared_uid:
- print " %-*s %-*s [%s]" % (self.max_fn_len, apk.filename,
+ print(" %-*s %-*s [%s]" % (self.max_fn_len, apk.filename,
self.max_pkg_len, apk.package,
- apk.shared_uid)
+ apk.shared_uid))
else:
- print " %-*s %-*s" % (self.max_fn_len, apk.filename,
- self.max_pkg_len, apk.package)
- print
+ print(" %-*s %-*s" % (self.max_fn_len, apk.filename,
+ self.max_pkg_len, apk.package))
+ print()
def CompareWith(self, other):
"""Look for instances where a given package that exists in both
@@ -339,12 +348,12 @@ class TargetFiles(object):
by_certpair.setdefault((other.apks[i].certs,
self.apks[i].certs), []).append(i)
else:
- print "%s [%s]: new APK (not in comparison target_files)" % (
- i, self.apks[i].filename)
+ print("%s [%s]: new APK (not in comparison target_files)" % (
+ i, self.apks[i].filename))
else:
if i in other.apks:
- print "%s [%s]: removed APK (only in comparison target_files)" % (
- i, other.apks[i].filename)
+ print("%s [%s]: removed APK (only in comparison target_files)" % (
+ i, other.apks[i].filename))
if by_certpair:
AddProblem("some APKs changed certs")
@@ -352,23 +361,23 @@ class TargetFiles(object):
for (old, new), packages in sorted(by_certpair.items()):
for i, o in enumerate(old):
if i == 0:
- print "was", ALL_CERTS.Get(o)
+ print("was", ALL_CERTS.Get(o))
else:
- print " ", ALL_CERTS.Get(o)
+ print(" ", ALL_CERTS.Get(o))
for i, n in enumerate(new):
if i == 0:
- print "now", ALL_CERTS.Get(n)
+ print("now", ALL_CERTS.Get(n))
else:
- print " ", ALL_CERTS.Get(n)
+ print(" ", ALL_CERTS.Get(n))
for i in sorted(packages):
old_fn = other.apks[i].filename
new_fn = self.apks[i].filename
if old_fn == new_fn:
- print " %-*s [%s]" % (max_pkg_len, i, old_fn)
+ print(" %-*s [%s]" % (max_pkg_len, i, old_fn))
else:
- print " %-*s [was: %s; now: %s]" % (max_pkg_len, i,
- old_fn, new_fn)
- print
+ print(" %-*s [was: %s; now: %s]" % (max_pkg_len, i,
+ old_fn, new_fn))
+ print()
def main(argv):
@@ -423,9 +432,9 @@ def main(argv):
target_files.CompareWith(compare_files)
if PROBLEMS:
- print "%d problem(s) found:\n" % (len(PROBLEMS),)
+ print("%d problem(s) found:\n" % len(PROBLEMS))
for p in PROBLEMS:
- print p
+ print(p)
return 1
return 0
@@ -436,7 +445,7 @@ if __name__ == '__main__':
r = main(sys.argv[1:])
sys.exit(r)
except common.ExternalError as e:
- print
- print " ERROR: %s" % (e,)
- print
+ print()
+ print(" ERROR: %s" % e)
+ print()
sys.exit(1)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 94eacc2..32bbc68 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import copy
import errno
import getopt
@@ -34,6 +36,17 @@ import rangelib
from hashlib import sha1 as sha1
+try:
+ raw_input
+except NameError:
+ raw_input = input
+
+
+def iteritems(obj):
+ if hasattr(obj, 'iteritems'):
+ return obj.iteritems()
+ return obj.items()
+
class Options(object):
def __init__(self):
@@ -70,6 +83,10 @@ OPTIONS = Options()
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
+# Stash size cannot exceed cache_size * threshold.
+OPTIONS.cache_size = None
+OPTIONS.stash_threshold = 0.8
+
class ExternalError(RuntimeError):
pass
@@ -79,7 +96,7 @@ def Run(args, **kwargs):
"""Create and return a subprocess.Popen object, printing the command
line on the terminal if -v was specified."""
if OPTIONS.verbose:
- print " running: ", " ".join(args)
+ print(" running: ", " ".join(args))
return subprocess.Popen(args, **kwargs)
@@ -150,6 +167,8 @@ def LoadInfoDict(input_file):
if "fstab_version" not in d:
d["fstab_version"] = "1"
+ if "device_type" not in d:
+ d["device_type"] = "MMC"
try:
data = read_helper("META/imagesizes.txt")
for line in data.split("\n"):
@@ -179,7 +198,7 @@ def LoadInfoDict(input_file):
makeint("boot_size")
makeint("fstab_version")
- d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
+ d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"], d["device_type"])
d["build.prop"] = LoadBuildProp(read_helper)
return d
@@ -187,7 +206,7 @@ def LoadBuildProp(read_helper):
try:
data = read_helper("SYSTEM/build.prop")
except KeyError:
- print "Warning: could not find SYSTEM/build.prop in %s" % zip
+ print("Warning: could not find SYSTEM/build.prop in %s" % zip)
data = ""
return LoadDictionaryFromLines(data.split("\n"))
@@ -202,7 +221,7 @@ def LoadDictionaryFromLines(lines):
d[name] = value
return d
-def LoadRecoveryFSTab(read_helper, fstab_version):
+def LoadRecoveryFSTab(read_helper, fstab_version, type):
class Partition(object):
def __init__(self, mount_point, fs_type, device, length, device2, context):
self.mount_point = mount_point
@@ -215,7 +234,7 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
try:
data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
except KeyError:
- print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
+ print("Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab")
data = ""
if fstab_version == 1:
@@ -247,11 +266,12 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
if i.startswith("length="):
length = int(i[7:])
else:
- print "%s: unknown option \"%s\"" % (mount_point, i)
+ print("%s: unknown option \"%s\"" % (mount_point, i))
- d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
- device=pieces[2], length=length,
- device2=device2)
+ if not d.get(mount_point):
+ d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
+ device=pieces[2], length=length,
+ device2=device2)
elif fstab_version == 2:
d = {}
@@ -287,9 +307,10 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
context = i
mount_point = pieces[1]
- d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
- device=pieces[0], length=length,
- device2=None, context=context)
+ if not d.get(mount_point):
+ d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
+ device=pieces[0], length=length,
+ device2=None, context=context)
else:
raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
@@ -299,7 +320,7 @@ def LoadRecoveryFSTab(read_helper, fstab_version):
def DumpInfoDict(d):
for k, v in sorted(d.items()):
- print "%-25s = (%s) %s" % (k, type(v).__name__, v)
+ print("%-25s = (%s) %s" % (k, type(v).__name__, v))
def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
@@ -317,6 +338,9 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
+ bootimg_key = os.getenv("PRODUCT_PRIVATE_KEY", None)
+ verity_key = os.getenv("PRODUCT_VERITY_KEY", None)
+ custom_boot_signer = os.getenv("PRODUCT_BOOT_SIGNER", None)
if os.access(fs_config_file, os.F_OK):
cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
@@ -331,49 +355,124 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
- # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
- mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
-
- cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
-
- fn = os.path.join(sourcedir, "second")
+ """check if uboot is requested"""
+ fn = os.path.join(sourcedir, "ubootargs")
if os.access(fn, os.F_OK):
- cmd.append("--second")
- cmd.append(fn)
+ cmd = ["mkimage"]
+ for argument in open(fn).read().rstrip("\n").split(" "):
+ cmd.append(argument)
+ cmd.append("-d")
+ cmd.append(os.path.join(sourcedir, "kernel")+":"+ramdisk_img.name)
+ cmd.append(img.name)
- fn = os.path.join(sourcedir, "cmdline")
- if os.access(fn, os.F_OK):
- cmd.append("--cmdline")
- cmd.append(open(fn).read().rstrip("\n"))
-
- fn = os.path.join(sourcedir, "base")
- if os.access(fn, os.F_OK):
- cmd.append("--base")
- cmd.append(open(fn).read().rstrip("\n"))
-
- fn = os.path.join(sourcedir, "pagesize")
- if os.access(fn, os.F_OK):
- cmd.append("--pagesize")
- cmd.append(open(fn).read().rstrip("\n"))
-
- args = info_dict.get("mkbootimg_args", None)
- if args and args.strip():
- cmd.extend(shlex.split(args))
-
- img_unsigned = None
- if info_dict.get("vboot", None):
- img_unsigned = tempfile.NamedTemporaryFile()
- cmd.extend(["--ramdisk", ramdisk_img.name,
- "--output", img_unsigned.name])
else:
- cmd.extend(["--ramdisk", ramdisk_img.name,
+ # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
+ mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
+ cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
+
+ fn = os.path.join(sourcedir, "second")
+ if os.access(fn, os.F_OK):
+ cmd.append("--second")
+ cmd.append(fn)
+
+ fn = os.path.join(sourcedir, "cmdline")
+ if os.access(fn, os.F_OK):
+ cmd.append("--cmdline")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "base")
+ if os.access(fn, os.F_OK):
+ cmd.append("--base")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "tagsaddr")
+ if os.access(fn, os.F_OK):
+ cmd.append("--tags-addr")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "tags_offset")
+ if os.access(fn, os.F_OK):
+ cmd.append("--tags_offset")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "ramdisk_offset")
+ if os.access(fn, os.F_OK):
+ cmd.append("--ramdisk_offset")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "dt")
+ if os.access(fn, os.F_OK):
+ cmd.append("--dt")
+ cmd.append(fn)
+
+ fn = os.path.join(sourcedir, "pagesize")
+ if os.access(fn, os.F_OK):
+ kernel_pagesize=open(fn).read().rstrip("\n")
+ cmd.append("--pagesize")
+ cmd.append(kernel_pagesize)
+
+ args = info_dict.get("mkbootimg_args", None)
+ if args and args.strip():
+ cmd.extend(shlex.split(args))
+
+ img_unsigned = None
+ if info_dict.get("vboot", None):
+ img_unsigned = tempfile.NamedTemporaryFile()
+ cmd.extend(["--ramdisk", ramdisk_img.name,
+ "--output", img_unsigned.name])
+ else:
+ cmd.extend(["--ramdisk", ramdisk_img.name,
"--output", img.name])
-
+
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
assert p.returncode == 0, "mkbootimg of %s image failed" % (
os.path.basename(sourcedir),)
+ if custom_boot_signer and bootimg_key and os.path.exists(bootimg_key):
+ print("Signing bootable image with custom boot signer...")
+ img_secure = tempfile.NamedTemporaryFile()
+ p = Run([custom_boot_signer, img.name, img_secure.name], stdout=subprocess.PIPE)
+ p.communicate()
+ assert p.returncode == 0, "signing of bootable image failed"
+ shutil.copyfile(img_secure.name, img.name)
+ img_secure.close()
+ elif bootimg_key and os.path.exists(bootimg_key) and kernel_pagesize > 0:
+ print("Signing bootable image...")
+ bootimg_key_passwords = {}
+ bootimg_key_passwords.update(PasswordManager().GetPasswords(bootimg_key.split()))
+ bootimg_key_password = bootimg_key_passwords[bootimg_key]
+ if bootimg_key_password is not None:
+ bootimg_key_password += "\n"
+ img_sha256 = tempfile.NamedTemporaryFile()
+ img_sig = tempfile.NamedTemporaryFile()
+ img_sig_padded = tempfile.NamedTemporaryFile()
+ img_secure = tempfile.NamedTemporaryFile()
+ p = Run(["openssl", "dgst", "-sha256", "-binary", "-out", img_sha256.name, img.name],
+ stdout=subprocess.PIPE)
+ p.communicate()
+ assert p.returncode == 0, "signing of bootable image failed"
+ p = Run(["openssl", "rsautl", "-sign", "-in", img_sha256.name, "-inkey", bootimg_key, "-out",
+ img_sig.name, "-passin", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ p.communicate(bootimg_key_password)
+ assert p.returncode == 0, "signing of bootable image failed"
+ p = Run(["dd", "if=/dev/zero", "of=%s" % img_sig_padded.name, "bs=%s" % kernel_pagesize,
+ "count=1"], stdout=subprocess.PIPE)
+ p.communicate()
+ assert p.returncode == 0, "signing of bootable image failed"
+ p = Run(["dd", "if=%s" % img_sig.name, "of=%s" % img_sig_padded.name, "conv=notrunc"],
+ stdout=subprocess.PIPE)
+ p.communicate()
+ assert p.returncode == 0, "signing of bootable image failed"
+ p = Run(["cat", img.name, img_sig_padded.name], stdout=img_secure.file.fileno())
+ p.communicate()
+ assert p.returncode == 0, "signing of bootable image failed"
+ shutil.copyfile(img_secure.name, img.name)
+ img_sha256.close()
+ img_sig.close()
+ img_sig_padded.close()
+ img_secure.close()
+
if (info_dict.get("boot_signer", None) == "true" and
info_dict.get("verity_key", None)):
path = "/" + os.path.basename(sourcedir).lower()
@@ -382,8 +481,21 @@ def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
+ verity_key_password = None
+
+ if verity_key and os.path.exists(verity_key+".pk8") and kernel_pagesize > 0:
+ verity_key_passwords = {}
+ verity_key_passwords.update(PasswordManager().GetPasswords(verity_key.split()))
+ verity_key_password = verity_key_passwords[verity_key]
+
+ if verity_key_password is not None:
+ verity_key_password += "\n"
+ p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ p.communicate(verity_key_password)
+ else:
+ p = Run(cmd)
+ p.communicate()
+
assert p.returncode == 0, "boot_signer of %s image failed" % path
# Sign the image if vboot is non-empty.
@@ -423,15 +535,15 @@ def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
+ print("using prebuilt %s from BOOTABLE_IMAGES..." % prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
if os.path.exists(prebuilt_path):
- print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
+ print("using prebuilt %s from IMAGES..." % prebuilt_name)
return File.FromLocalFile(name, prebuilt_path)
- print "building image from target_files %s..." % (tree_subdir,)
+ print("building image from target_files %s..." % tree_subdir)
fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
os.path.join(unpack_dir, fs_config),
@@ -455,6 +567,7 @@ def UnzipTemp(filename, pattern=None):
OPTIONS.tempfiles.append(tmp)
def unzip_to_dir(filename, dirname):
+ subprocess.call(["rm", "-rf", dirname + filename, "targetfiles-*"])
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.append(pattern)
@@ -509,7 +622,7 @@ def GetKeyPasswords(keylist):
if p.returncode == 0:
# Encrypted key with empty string as password.
key_passwords[k] = ''
- elif stderr.startswith('Error decrypting key'):
+ elif stderr.startswith(b'Error decrypting key'):
# Definitely encrypted key.
# It would have said "Error reading key" if it didn't parse correctly.
need_passwords.append(k)
@@ -583,8 +696,8 @@ def CheckSize(data, target, info_dict):
fs_type = None
limit = None
if info_dict["fstab"]:
- if mount_point == "/userdata":
- mount_point = "/data"
+ if mount_point == "/userdata_extra": mount_point = "/data"
+ if mount_point == "/userdata": mount_point = "/data"
p = info_dict["fstab"][mount_point]
fs_type = p.fs_type
device = p.device
@@ -604,11 +717,11 @@ def CheckSize(data, target, info_dict):
if pct >= 99.0:
raise ExternalError(msg)
elif pct >= 95.0:
- print
- print " WARNING: ", msg
- print
+ print()
+ print(" WARNING: ", msg)
+ print()
elif OPTIONS.verbose:
- print " ", msg
+ print(" ", msg)
def ReadApkCerts(tf_zip):
@@ -657,8 +770,8 @@ COMMON_DOCSTRING = """
"""
def Usage(docstring):
- print docstring.rstrip("\n")
- print COMMON_DOCSTRING
+ print(docstring.rstrip("\n"))
+ print(COMMON_DOCSTRING)
def ParseOptions(argv,
@@ -682,7 +795,7 @@ def ParseOptions(argv,
list(extra_long_opts))
except getopt.GetoptError as err:
Usage(docstring)
- print "**", str(err), "**"
+ print("**", str(err), "**")
sys.exit(2)
for o, a in opts:
@@ -778,7 +891,7 @@ class PasswordManager(object):
current[i] = ""
if not first:
- print "key file %s still missing some passwords." % (self.pwfile,)
+ print("key file %s still missing some passwords." % self.pwfile)
answer = raw_input("try to edit again? [y]> ").strip()
if answer and answer[0] not in 'yY':
raise RuntimeError("key passwords unavailable")
@@ -792,7 +905,7 @@ class PasswordManager(object):
values.
"""
result = {}
- for k, v in sorted(current.iteritems()):
+ for k, v in sorted(iteritems(current)):
if v:
result[k] = v
else:
@@ -813,7 +926,7 @@ class PasswordManager(object):
f.write("# (Additional spaces are harmless.)\n\n")
first_line = None
- sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
+ sorted_list = sorted((not v, k, v) for (k, v) in current.items())
for i, (_, k, v) in enumerate(sorted_list):
f.write("[[[ %s ]]] %s\n" % (v, k))
if not v and first_line is None:
@@ -838,13 +951,13 @@ class PasswordManager(object):
continue
m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
if not m:
- print "failed to parse password file: ", line
+ print("failed to parse password file: ", line)
else:
result[m.group(2)] = m.group(1)
f.close()
except IOError as e:
if e.errno != errno.ENOENT:
- print "error reading password file: ", str(e)
+ print("error reading password file: ", str(e))
return result
@@ -945,7 +1058,7 @@ class DeviceSpecificParams(object):
"""Keyword arguments to the constructor become attributes of this
object, which is passed to all functions in the device-specific
module."""
- for k, v in kwargs.iteritems():
+ for k, v in iteritems(kwargs):
setattr(self, k, v)
self.extras = OPTIONS.extras
@@ -962,10 +1075,10 @@ class DeviceSpecificParams(object):
if x == ".py":
f = b
info = imp.find_module(f, [d])
- print "loaded device-specific extensions from", path
+ print("loaded device-specific extensions from", path)
self.module = imp.load_module("device_specific", *info)
except ImportError:
- print "unable to load device-specific module; assuming none"
+ print("unable to load device-specific module; assuming none")
def _DoCall(self, function_name, *args, **kwargs):
"""Call the named function in the device-specific module, passing
@@ -992,6 +1105,11 @@ class DeviceSpecificParams(object):
used to install the image for the device's baseband processor."""
return self._DoCall("FullOTA_InstallEnd")
+ def FullOTA_PostValidate(self):
+ """Called after installing and validating /system; typically this is
+ used to resize the system partition after a block based installation."""
+ return self._DoCall("FullOTA_PostValidate")
+
def IncrementalOTA_Assertions(self):
"""Called after emitting the block of assertions at the top of an
incremental OTA package. Implementations can add whatever
@@ -1096,7 +1214,7 @@ class Difference(object):
th.start()
th.join(timeout=300) # 5 mins
if th.is_alive():
- print "WARNING: diff command timed out"
+ print("WARNING: diff command timed out")
p.terminate()
th.join(5)
if th.is_alive():
@@ -1104,8 +1222,8 @@ class Difference(object):
th.join()
if err or p.returncode != 0:
- print "WARNING: failure running %s:\n%s\n" % (
- diff_program, "".join(err))
+ print("WARNING: failure running %s:\n%s\n" % (
+ diff_program, "".join(err)))
self.patch = None
return None, None, None
diff = ptemp.read()
@@ -1127,7 +1245,7 @@ class Difference(object):
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
- print len(diffs), "diffs to compute"
+ print(len(diffs), "diffs to compute")
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
@@ -1153,13 +1271,13 @@ def ComputeDifferences(diffs):
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
- print "patching failed! %s" % (name,)
+ print("patching failed! %s" % name)
else:
- print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
- dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
+ print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
+ dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
lock.release()
except Exception as e:
- print e
+ print(e)
raise
# start worker threads; wait for them all to finish.
@@ -1342,7 +1460,11 @@ PARTITION_TYPES = {
"ext4": "EMMC",
"emmc": "EMMC",
"f2fs": "EMMC",
- "squashfs": "EMMC"
+ "squashfs": "EMMC",
+ "ext2": "EMMC",
+ "ext3": "EMMC",
+ "vfat": "EMMC",
+ "osip": "OSIP"
}
def GetTypeAndDevice(mount_point, info):
@@ -1356,16 +1478,18 @@ def GetTypeAndDevice(mount_point, info):
def ParseCertificate(data):
"""Parse a PEM-format certificate."""
+ from codecs import decode
cert = []
save = False
for line in data.split("\n"):
if "--END CERTIFICATE--" in line:
break
if save:
- cert.append(line)
+ l = line.encode() if hasattr(line, 'encode') else line
+ cert.append(l)
if "--BEGIN CERTIFICATE--" in line:
save = True
- cert = "".join(cert).decode('base64')
+ cert = decode(b"".join(cert), 'base64')
return cert
def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
@@ -1407,6 +1531,10 @@ def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
return
sh = """#!/system/bin/sh
+if [ -f /system/etc/recovery-transform.sh ]; then
+ exec sh /system/etc/recovery-transform.sh %(recovery_size)d %(recovery_sha1)s %(boot_size)d %(boot_sha1)s
+fi
+
if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
else
@@ -1423,18 +1551,27 @@ fi
'bonus_args': bonus_args}
# The install script location moved from /system/etc to /system/bin
- # in the L release. Parse the init.rc file to find out where the
+ # in the L release. Parse init.*.rc files to find out where the
# target-files expects it to be, and put it there.
sh_location = "etc/install-recovery.sh"
- try:
- with open(os.path.join(input_dir, "BOOT", "RAMDISK", "init.rc")) as f:
+ found = False
+ init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
+ init_rc_files = os.listdir(init_rc_dir)
+ for init_rc_file in init_rc_files:
+ if (not init_rc_file.startswith('init.') or
+ not init_rc_file.endswith('.rc')):
+ continue
+
+ with open(os.path.join(init_rc_dir, init_rc_file)) as f:
for line in f:
m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
if m:
sh_location = m.group(1)
- print "putting script in", sh_location
+ found = True
break
- except (OSError, IOError) as e:
- print "failed to read init.rc: %s" % (e,)
+ if found:
+ break
+
+ print("putting script in", sh_location)
output_sink(sh_location, sh)
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index a52e328..825a7eb 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -117,20 +117,44 @@ class EdifyGenerator(object):
def AssertDevice(self, device):
"""Assert that the device identifier is the given string."""
- cmd = ('getprop("ro.product.device") == "%s" || '
- 'abort("This package is for \\"%s\\" devices; '
- 'this is a \\"" + getprop("ro.product.device") + "\\".");') % (
- device, device)
+ cmd = ('assert(' +
+ ' || '.join(['getprop("ro.product.device") == "%s" || getprop("ro.build.product") == "%s"'
+ % (i, i) for i in device.split(",")]) +
+ ' || abort("This package is for device: %s; ' +
+ 'this device is " + getprop("ro.product.device") + ".");' +
+ ');') % device
self.script.append(cmd)
def AssertSomeBootloader(self, *bootloaders):
- """Asert that the bootloader version is one of *bootloaders."""
+ """Assert that the bootloader version is one of *bootloaders."""
cmd = ("assert(" +
- " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
+ " || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
for b in bootloaders]) +
+ ' || abort("This package supports bootloader(s): ' +
+ ", ".join(["%s" % (b,) for b in bootloaders]) +
+ '; this device has bootloader " + getprop("ro.bootloader") + ".");' +
");")
self.script.append(self.WordWrap(cmd))
+ def AssertSomeBaseband(self, *basebands):
+ """Assert that the baseband version is one of *basebands."""
+ cmd = ("assert(" +
+ " || ".join(['getprop("ro.baseband") == "%s"' % (b,)
+ for b in basebands]) +
+ ' || abort("This package supports baseband(s): ' +
+ ", ".join(["%s" % (b,) for b in basebands]) +
+ '; this device has baseband " + getprop("ro.baseband") + ".");' +
+ ");")
+ self.script.append(self.WordWrap(cmd))
+
+ def RunBackup(self, command):
+ self.script.append(('run_program("/tmp/install/bin/backuptool.sh", "%s");' % command))
+
+ def ValidateSignatures(self, command):
+ self.script.append('package_extract_file("META-INF/org/cyanogenmod/releasekey", "/tmp/releasekey");')
+ # Exit code 124 == abort. run_program returns raw, so left-shift 8bit
+ self.script.append('run_program("/tmp/install/bin/otasigcheck.sh") != "31744" || abort("Can\'t install this package on top of incompatible data. Please try another package or run a factory reset");')
+
def ShowProgress(self, frac, dur):
"""Update the progress bar, advancing it over 'frac' over the next
'dur' seconds. 'dur' may be zero to advance it via SetProgress
@@ -189,6 +213,12 @@ class EdifyGenerator(object):
p.mount_point, mount_flags))
self.mounts.add(p.mount_point)
+ def Unmount(self, mount_point):
+ """Unmount the partiiton with the given mount_point."""
+ if mount_point in self.mounts:
+ self.mounts.remove(mount_point)
+ self.script.append('unmount("%s");' % (mount_point,))
+
def UnpackPackageDir(self, src, dst):
"""Unpack a given directory from the OTA package into the given
destination directory."""
@@ -293,6 +323,10 @@ class EdifyGenerator(object):
self.script.append(
'write_raw_image(package_extract_file("%(fn)s"), "%(device)s");'
% args)
+ elif partition_type == "OSIP":
+ self.script.append(
+ 'write_osip_image(package_extract_file("%(fn)s"), "%(device)s");'
+ % args)
elif partition_type == "EMMC":
if mapfn:
args["map"] = mapfn
@@ -310,10 +344,10 @@ class EdifyGenerator(object):
if not self.info.get("use_set_metadata", False):
self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
else:
- if capabilities is None:
- capabilities = "0x0"
- cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
- '"capabilities", %s' % (fn, uid, gid, mode, capabilities)
+ cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o' \
+ % (fn, uid, gid, mode)
+ if capabilities is not None:
+ cmd += ', "capabilities", %s' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
@@ -326,11 +360,11 @@ class EdifyGenerator(object):
self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
% (uid, gid, dmode, fmode, fn))
else:
- if capabilities is None:
- capabilities = "0x0"
cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
- '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
- % (fn, uid, gid, dmode, fmode, capabilities)
+ '"dmode", 0%o, "fmode", 0%o' \
+ % (fn, uid, gid, dmode, fmode)
+ if capabilities is not None:
+ cmd += ', "capabilities", "%s"' % ( capabilities )
if selabel is not None:
cmd += ', "selabel", "%s"' % selabel
cmd += ');'
@@ -342,7 +376,7 @@ class EdifyGenerator(object):
for d, l in symlink_list:
by_dest.setdefault(d, []).append(l)
- for dest, links in sorted(by_dest.iteritems()):
+ for dest, links in sorted(by_dest.items()):
cmd = ('symlink("%s", ' % (dest,) +
",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
self.script.append(self.WordWrap(cmd))
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index c486992..33375ca 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -26,14 +26,20 @@ Usage: img_from_target_files [flags] input_target_files output_image_zip
"""
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+import errno
import os
+import re
import shutil
+import subprocess
+import tempfile
import zipfile
import common
@@ -47,6 +53,31 @@ def CopyInfo(output_zip):
output_zip, os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
"android-info.txt")
+def AddRadio(output_zip):
+ """If they exist, add RADIO files to the output."""
+ if os.path.isdir(os.path.join(OPTIONS.input_tmp, "RADIO")):
+ for radio_root, radio_dirs, radio_files in os.walk(os.path.join(OPTIONS.input_tmp, "RADIO")):
+ for radio_file in radio_files:
+ output_zip.write(os.path.join(radio_root, radio_file), radio_file)
+
+ # If a filesmap file exists, create a script to flash the radio images based on it
+ filesmap = os.path.join(OPTIONS.input_tmp, "RADIO/filesmap")
+ if os.path.isfile(filesmap):
+ print("creating flash-radio.sh...")
+ filesmap_data = open(filesmap, "r")
+ filesmap_regex = re.compile(r'^(\S+)\s\S+\/by-name\/(\S+).*')
+ tmp_flash_radio = tempfile.NamedTemporaryFile()
+ tmp_flash_radio.write("#!/bin/sh\n\n")
+ for filesmap_line in filesmap_data:
+ filesmap_entry = filesmap_regex.search(filesmap_line)
+ if filesmap_entry:
+ tmp_flash_radio.write("fastboot flash %s %s\n" % (filesmap_entry.group(2), filesmap_entry.group(1)))
+ tmp_flash_radio.flush()
+ if os.path.getsize(tmp_flash_radio.name) > 0:
+ output_zip.write(tmp_flash_radio.name, "flash-radio.sh")
+ else:
+ print("flash-radio.sh is empty, skipping...")
+ tmp_flash_radio.close()
def main(argv):
bootable_only = [False]
@@ -72,6 +103,7 @@ def main(argv):
OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
CopyInfo(output_zip)
+ AddRadio(output_zip)
try:
done = False
@@ -79,6 +111,7 @@ def main(argv):
if os.path.exists(images_path):
# If this is a new target-files, it already contains the images,
# and all we have to do is copy them to the output zip.
+ # Skip oem.img files since they are not needed in fastboot images.
images = os.listdir(images_path)
if images:
for image in images:
@@ -86,6 +119,8 @@ def main(argv):
continue
if not image.endswith(".img"):
continue
+ if image == "oem.img":
+ continue
common.ZipWrite(
output_zip, os.path.join(images_path, image), image)
done = True
@@ -116,7 +151,7 @@ def main(argv):
recovery_image.AddToZip(output_zip)
def banner(s):
- print "\n\n++++ " + s + " ++++\n\n"
+ print("\n\n++++ " + s + " ++++\n\n")
if not bootable_only:
banner("AddSystem")
@@ -129,15 +164,17 @@ def main(argv):
pass # no vendor partition for this device
banner("AddUserdata")
add_img_to_target_files.AddUserdata(output_zip, prefix="")
+ banner("AddUserdataExtra")
+ add_img_to_target_files.AddUserdataExtra(output_zip, prefix="")
banner("AddCache")
add_img_to_target_files.AddCache(output_zip, prefix="")
finally:
- print "cleaning up..."
+ print("cleaning up...")
common.ZipClose(output_zip)
shutil.rmtree(OPTIONS.input_tmp)
- print "done."
+ print("done.")
if __name__ == '__main__':
@@ -145,7 +182,7 @@ if __name__ == '__main__':
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print
- print " ERROR: %s" % (e,)
- print
+ print()
+ print(" ERROR: %s" % e)
+ print()
sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 08d1450..7c6007e 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -14,10 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
import os
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index c5c16b4..bc40873 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -92,12 +92,26 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
--stash_threshold <float>
Specifies the threshold that will be used to compute the maximum
allowed stash size (defaults to 0.8).
+
+ --backup <boolean>
+ Enable or disable the execution of backuptool.sh.
+ Disabled by default.
+
+ --override_device <device>
+ Override device-specific asserts. Can be a comma-separated list.
+
+ --override_prop <boolean>
+ Override build.prop items with custom vendor init.
+ Enabled when TARGET_UNIFIED_DEVICE is defined in BoardConfig
+
"""
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
import multiprocessing
@@ -131,14 +145,14 @@ OPTIONS.oem_source = None
OPTIONS.fallback_to_full = True
OPTIONS.full_radio = False
OPTIONS.full_bootloader = False
-# Stash size cannot exceed cache_size * threshold.
-OPTIONS.cache_size = None
-OPTIONS.stash_threshold = 0.8
+OPTIONS.backuptool = False
+OPTIONS.override_device = 'auto'
+OPTIONS.override_prop = False
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
value. Returns 'default' if the dict is empty."""
- x = [(v, k) for (k, v) in d.iteritems()]
+ x = [(v, k) for (k, v) in d.items()]
if not x:
return default
x.sort()
@@ -148,7 +162,7 @@ def MostPopularKey(d, default):
def IsSymlink(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
symlink."""
- return (info.external_attr >> 16) == 0o120777
+ return (info.external_attr >> 16) & 0o770000 == 0o120000
def IsRegular(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
@@ -257,14 +271,14 @@ class Item(object):
def Dump(self, indent=0):
if self.uid is not None:
- print "%s%s %d %d %o" % (
- " " * indent, self.name, self.uid, self.gid, self.mode)
+ print("%s%s %d %d %o" % (
+ " " * indent, self.name, self.uid, self.gid, self.mode))
else:
- print "%s%s %s %s %s" % (
- " " * indent, self.name, self.uid, self.gid, self.mode)
+ print("%s%s %s %s %s" % (
+ " " * indent, self.name, self.uid, self.gid, self.mode))
if self.is_dir:
- print "%s%s" % (" "*indent, self.descendants)
- print "%s%s" % (" "*indent, self.best_subtree)
+ print("%s%s" % (" "*indent, self.descendants))
+ print("%s%s" % (" "*indent, self.best_subtree))
for i in self.children:
i.Dump(indent=indent+1)
@@ -288,7 +302,7 @@ class Item(object):
d = self.descendants
for i in self.children:
if i.is_dir:
- for k, v in i.CountChildMetadata().iteritems():
+ for k, v in i.CountChildMetadata().items():
d[k] = d.get(k, 0) + v
else:
k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities)
@@ -300,7 +314,7 @@ class Item(object):
# First, find the (uid, gid) pair that matches the most
# descendants.
ug = {}
- for (uid, gid, _, _, _, _), count in d.iteritems():
+ for (uid, gid, _, _, _, _), count in d.items():
ug[(uid, gid)] = ug.get((uid, gid), 0) + count
ug = MostPopularKey(ug, (0, 0))
@@ -310,7 +324,7 @@ class Item(object):
best_fmode = (0, 0o644)
best_selabel = (0, None)
best_capabilities = (0, None)
- for k, count in d.iteritems():
+ for k, count in d.items():
if k[:2] != ug:
continue
if k[2] is not None and count >= best_dmode[0]:
@@ -413,7 +427,10 @@ def SignOutput(temp_zip_name, output_zip_name):
def AppendAssertions(script, info_dict, oem_dict=None):
oem_props = info_dict.get("oem_fingerprint_properties")
if oem_props is None or len(oem_props) == 0:
- device = GetBuildProp("ro.product.device", info_dict)
+ if OPTIONS.override_device == "auto":
+ device = GetBuildProp("ro.product.device", info_dict)
+ else:
+ device = OPTIONS.override_device
script.AssertDevice(device)
else:
if oem_dict is None:
@@ -445,8 +462,9 @@ def GetOemProperty(name, oem_props, oem_dict, info_dict):
return oem_dict[name]
return GetBuildProp(name, info_dict)
-
def CalculateFingerprint(oem_props, oem_dict, info_dict):
+ if OPTIONS.override_prop:
+ return GetBuildProp("ro.build.date.utc", info_dict)
if oem_props is None:
return GetBuildProp("ro.build.fingerprint", info_dict)
return "%s/%s/%s:%s" % (
@@ -467,11 +485,11 @@ def GetImage(which, tmpdir, info_dict):
path = os.path.join(tmpdir, "IMAGES", which + ".img")
mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
if os.path.exists(path) and os.path.exists(mappath):
- print "using %s.img from target-files" % (which,)
+ print("using %s.img from target-files" % which)
# This is a 'new' target-files, which already has the image in it.
else:
- print "building %s.img from target-files" % (which,)
+ print("building %s.img from target-files" % which)
# This is an 'old' target-files, which does not contain images
# already built. Build them.
@@ -496,6 +514,15 @@ def GetImage(which, tmpdir, info_dict):
return sparse_img.SparseImage(path, mappath, clobbered_blocks)
+def CopyInstallTools(output_zip):
+ install_path = os.path.join(OPTIONS.input_tmp, "INSTALL")
+ for root, subdirs, files in os.walk(install_path):
+ for f in files:
+ install_source = os.path.join(root, f)
+ install_target = os.path.join("install", os.path.relpath(root, install_path), f)
+ output_zip.write(install_source, install_target)
+
+
def WriteFullOTAPackage(input_zip, output_zip):
# TODO: how to determine this? We don't know what version it will
# be installed on top of. For now, we expect the API just won't
@@ -513,13 +540,18 @@ def WriteFullOTAPackage(input_zip, output_zip):
oem_dict = common.LoadDictionaryFromLines(
open(OPTIONS.oem_source).readlines())
- metadata = {
- "post-build": CalculateFingerprint(oem_props, oem_dict,
- OPTIONS.info_dict),
- "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
- OPTIONS.info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
- }
+ if OPTIONS.override_prop:
+ metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.info_dict),
+ }
+ else:
+ metadata = {"post-build": CalculateFingerprint(
+ oem_props, oem_dict, OPTIONS.info_dict),
+ "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ OPTIONS.info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.info_dict),
+ }
device_specific = common.DeviceSpecificParams(
input_zip=input_zip,
@@ -533,10 +565,10 @@ def WriteFullOTAPackage(input_zip, output_zip):
has_recovery_patch = HasRecoveryPatch(input_zip)
block_based = OPTIONS.block_based and has_recovery_patch
- if not OPTIONS.omit_prereq:
- ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
- ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
- script.AssertOlderBuild(ts, ts_text)
+ #if not OPTIONS.omit_prereq:
+ # ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
+ # ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
+ # script.AssertOlderBuild(ts, ts_text)
AppendAssertions(script, OPTIONS.info_dict, oem_dict)
device_specific.FullOTA_Assertions()
@@ -585,8 +617,19 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
script.Print("Target: %s" % CalculateFingerprint(
oem_props, oem_dict, OPTIONS.info_dict))
+ script.AppendExtra("ifelse(is_mounted(\"/system\"), unmount(\"/system\"));")
device_specific.FullOTA_InstallBegin()
+ CopyInstallTools(output_zip)
+ script.UnpackPackageDir("install", "/tmp/install")
+ script.SetPermissionsRecursive("/tmp/install", 0, 0, 0o755, 0o644, None, None)
+ script.SetPermissionsRecursive("/tmp/install/bin", 0, 0, 0o755, 0o755, None, None)
+
+ if OPTIONS.backuptool:
+ script.Mount("/system")
+ script.RunBackup("backup")
+ script.Unmount("/system")
+
system_progress = 0.75
if OPTIONS.wipe_user_data:
@@ -594,6 +637,15 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
if HasVendorPartition(input_zip):
system_progress -= 0.1
+ if not OPTIONS.wipe_user_data:
+ script.AppendExtra("if is_mounted(\"/data\") then")
+ script.ValidateSignatures("data")
+ script.AppendExtra("else")
+ script.Mount("/data")
+ script.ValidateSignatures("data")
+ script.Unmount("/data")
+ script.AppendExtra("endif;")
+
if "selinux_fc" in OPTIONS.info_dict:
WritePolicyConfig(OPTIONS.info_dict["selinux_fc"], output_zip)
@@ -658,6 +710,16 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
+ device_specific.FullOTA_PostValidate()
+
+ if OPTIONS.backuptool:
+ script.ShowProgress(0.02, 10)
+ if block_based:
+ script.Mount("/system")
+ script.RunBackup("restore")
+ if block_based:
+ script.Unmount("/system")
+
script.ShowProgress(0.05, 5)
script.WriteRawImage("/boot", "boot.img")
@@ -688,15 +750,19 @@ endif;
script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
WriteMetadata(metadata, output_zip)
+ common.ZipWriteStr(output_zip, "system/build.prop",
+ ""+input_zip.read("SYSTEM/build.prop"))
+
+ common.ZipWriteStr(output_zip, "META-INF/org/cyanogenmod/releasekey",
+ ""+input_zip.read("META/releasekey.txt"))
def WritePolicyConfig(file_name, output_zip):
common.ZipWrite(output_zip, file_name, os.path.basename(file_name))
-
def WriteMetadata(metadata, output_zip):
common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
"".join(["%s=%s\n" % kv
- for kv in sorted(metadata.iteritems())]))
+ for kv in sorted(metadata.items())]))
def LoadPartitionFiles(z, partition):
@@ -744,18 +810,24 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
source_version, OPTIONS.target_info_dict,
fstab=OPTIONS.source_info_dict["fstab"])
- metadata = {
- "pre-device": GetBuildProp("ro.product.device",
- OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
- }
+ if OPTIONS.override_prop:
+ metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
+ else:
+ metadata = {"pre-device": GetBuildProp("ro.product.device",
+ OPTIONS.source_info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_version,
target_zip=target_zip,
+ input_zip=target_zip,
target_version=target_version,
+ input_version=target_version,
output_zip=output_zip,
script=script,
metadata=metadata,
@@ -767,10 +839,11 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
# devices with thumbprints are all using file-based OTAs. Long term we
# should factor out the common parts into a shared one to avoid further
# divergence.
- source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict)
- target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict)
- metadata["pre-build"] = source_fp
- metadata["post-build"] = target_fp
+ if not OPTIONS.override_prop:
+ source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict)
+ target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict)
+ metadata["pre-build"] = source_fp
+ metadata["post-build"] = target_fp
source_boot = common.GetBootableImage(
"/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT",
@@ -901,8 +974,8 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
else:
include_full_boot = False
- print "boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d))
+ print("boot target: %d source: %d diff: %d" % (
+ target_boot.size, source_boot.size, len(d)))
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
@@ -938,19 +1011,19 @@ else
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print "writing full boot image (forced by two-step mode)"
+ print("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step:
if updating_boot:
if include_full_boot:
- print "boot image changed; including full."
+ print("boot image changed; including full.")
script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img")
else:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
- print "boot image changed; including patch."
+ print("boot image changed; including patch.")
script.Print("Patching boot image...")
script.ShowProgress(0.1, 10)
script.ApplyPatch("%s:%s:%d:%s:%d:%s"
@@ -961,7 +1034,7 @@ else
target_boot.size, target_boot.sha1,
source_boot.sha1, "patch/boot.img.p")
else:
- print "boot image unchanged; skipping."
+ print("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
@@ -988,9 +1061,9 @@ endif;
class FileDifference(object):
def __init__(self, partition, source_zip, target_zip, output_zip):
self.deferred_patch_list = None
- print "Loading target..."
+ print("Loading target...")
self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
- print "Loading source..."
+ print("Loading source...")
self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
self.verbatim_targets = verbatim_targets = []
@@ -1017,14 +1090,14 @@ class FileDifference(object):
assert fn == tf.name
sf = ClosestFileMatch(tf, matching_file_cache, renames)
if sf is not None and sf.name != tf.name:
- print "File has moved from " + sf.name + " to " + tf.name
+ print("File has moved from " + sf.name + " to " + tf.name)
renames[sf.name] = tf
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
- print "send", fn, "verbatim"
+ print("send", fn, "verbatim")
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size, tf.sha1))
if fn in target_data.keys():
@@ -1109,8 +1182,8 @@ class FileDifference(object):
def EmitRenames(self, script):
if len(self.renames) > 0:
script.Print("Renaming files...")
- for src, tgt in self.renames.iteritems():
- print "Renaming " + src + " to " + tgt.name
+ for src, tgt in self.renames.items():
+ print("Renaming " + src + " to " + tgt.name)
script.RenameFile(src, tgt.name)
@@ -1144,18 +1217,24 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
oem_dict = common.LoadDictionaryFromLines(
open(OPTIONS.oem_source).readlines())
- metadata = {
- "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
- OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
- }
+ if OPTIONS.override_prop:
+ metadata = {"post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
+ else:
+ metadata = {"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ OPTIONS.source_info_dict),
+ "post-timestamp": GetBuildProp("ro.build.date.utc",
+ OPTIONS.target_info_dict),
+ }
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_version,
target_zip=target_zip,
+ input_zip=target_zip,
target_version=target_version,
+ input_version=target_version,
output_zip=output_zip,
script=script,
metadata=metadata,
@@ -1169,20 +1248,19 @@ def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
else:
vendor_diff = None
- target_fp = CalculateFingerprint(oem_props, oem_dict,
- OPTIONS.target_info_dict)
- source_fp = CalculateFingerprint(oem_props, oem_dict,
- OPTIONS.source_info_dict)
+ if not OPTIONS.override_prop:
+ target_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.target_info_dict)
+ source_fp = CalculateFingerprint(oem_props, oem_dict, OPTIONS.source_info_dict)
- if oem_props is None:
- script.AssertSomeFingerprint(source_fp, target_fp)
- else:
- script.AssertSomeThumbprint(
- GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
- GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
+ if oem_props is None:
+ script.AssertSomeFingerprint(source_fp, target_fp)
+ else:
+ script.AssertSomeThumbprint(
+ GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
+ GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
- metadata["pre-build"] = source_fp
- metadata["post-build"] = target_fp
+ metadata["pre-build"] = source_fp
+ metadata["post-build"] = target_fp
source_boot = common.GetBootableImage(
"/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT",
@@ -1250,8 +1328,8 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
""" % bcb_dev)
# Dump fingerprints
- script.Print("Source: %s" % (source_fp,))
- script.Print("Target: %s" % (target_fp,))
+ script.Print("Source: %s" % source_fp)
+ script.Print("Target: %s" % target_fp)
script.Print("Verifying current system...")
@@ -1265,8 +1343,8 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
if updating_boot:
d = common.Difference(target_boot, source_boot)
_, _, d = d.ComputePatch()
- print "boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d))
+ print("boot target: %d source: %d diff: %d" % (
+ target_boot.size, source_boot.size, len(d)))
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
@@ -1305,7 +1383,7 @@ else
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print "writing full boot image (forced by two-step mode)"
+ print("writing full boot image (forced by two-step mode)")
script.Print("Removing unneeded files...")
system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
@@ -1340,9 +1418,9 @@ else
source_boot.sha1, "patch/boot.img.p")
so_far += target_boot.size
script.SetProgress(so_far / total_patch_size)
- print "boot image changed; including."
+ print("boot image changed; including.")
else:
- print "boot image unchanged; skipping."
+ print("boot image unchanged; skipping.")
system_items = ItemSet("system", "META/filesystem_config.txt")
if vendor_diff:
@@ -1367,9 +1445,9 @@ else
target_recovery, target_boot)
script.DeleteFiles(["/system/recovery-from-boot.p",
"/system/etc/install-recovery.sh"])
- print "recovery image changed; including as patch from boot."
+ print("recovery image changed; including as patch from boot.")
else:
- print "recovery image unchanged; skipping."
+ print("recovery image unchanged; skipping.")
script.ShowProgress(0.1, 10)
@@ -1499,7 +1577,6 @@ endif;
WriteMetadata(metadata, output_zip)
-
def main(argv):
def option_handler(o, a):
@@ -1540,7 +1617,7 @@ def main(argv):
OPTIONS.verify = True
elif o == "--block":
OPTIONS.block_based = True
- elif o in ("-b", "--binary"):
+ elif o in ("-b", "--binary",):
OPTIONS.updater_binary = a
elif o in ("--no_fallback_to_full",):
OPTIONS.fallback_to_full = False
@@ -1550,6 +1627,12 @@ def main(argv):
except ValueError:
raise ValueError("Cannot parse value %r for option %r - expecting "
"a float" % (a, o))
+ elif o in ("--backup",):
+ OPTIONS.backuptool = bool(a.lower() == 'true')
+ elif o in ("--override_device",):
+ OPTIONS.override_device = a
+ elif o in ("--override_prop",):
+ OPTIONS.override_prop = bool(a.lower() == 'true')
else:
return False
return True
@@ -1575,6 +1658,9 @@ def main(argv):
"verify",
"no_fallback_to_full",
"stash_threshold=",
+ "backup=",
+ "override_device=",
+ "override_prop="
], extra_option_handler=option_handler)
if len(args) != 2:
@@ -1584,7 +1670,7 @@ def main(argv):
if OPTIONS.extra_script is not None:
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
- print "unzipping target target-files..."
+ print("unzipping target target-files...")
OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
OPTIONS.target_tmp = OPTIONS.input_tmp
@@ -1599,7 +1685,7 @@ def main(argv):
OPTIONS.input_tmp, "BOOT", "RAMDISK", "file_contexts")
if OPTIONS.verbose:
- print "--- target info ---"
+ print("--- target info ---")
common.DumpInfoDict(OPTIONS.info_dict)
# If the caller explicitly specified the device-specific extensions
@@ -1612,7 +1698,7 @@ def main(argv):
if OPTIONS.device_specific is None:
from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
if os.path.exists(from_input):
- print "(using device-specific extensions from target_files)"
+ print("(using device-specific extensions from target_files)")
OPTIONS.device_specific = from_input
else:
OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None)
@@ -1634,7 +1720,7 @@ def main(argv):
cache_size = OPTIONS.info_dict.get("cache_size", None)
if cache_size is None:
- raise RuntimeError("can't determine the cache partition size")
+ print("--- can't determine the cache partition size ---")
OPTIONS.cache_size = cache_size
if OPTIONS.incremental_source is None:
@@ -1647,7 +1733,7 @@ def main(argv):
break
else:
- print "unzipping source target-files..."
+ print("unzipping source target-files...")
OPTIONS.source_tmp, source_zip = common.UnzipTemp(
OPTIONS.incremental_source)
OPTIONS.target_info_dict = OPTIONS.info_dict
@@ -1660,7 +1746,7 @@ def main(argv):
"default_system_dev_certificate",
"build/target/product/security/testkey")
if OPTIONS.verbose:
- print "--- source info ---"
+ print("--- source info ---")
common.DumpInfoDict(OPTIONS.source_info_dict)
try:
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
@@ -1669,7 +1755,7 @@ def main(argv):
except ValueError:
if not OPTIONS.fallback_to_full:
raise
- print "--- failed to build incremental; falling back to full ---"
+ print("--- failed to build incremental; falling back to full ---")
OPTIONS.incremental_source = None
common.ZipClose(output_zip)
@@ -1677,7 +1763,7 @@ def main(argv):
SignOutput(temp_zip_file.name, args[1])
temp_zip_file.close()
- print "done."
+ print("done.")
if __name__ == '__main__':
@@ -1685,9 +1771,9 @@ if __name__ == '__main__':
common.CloseInheritedPipes()
main(sys.argv[1:])
except common.ExternalError as e:
- print
- print " ERROR: %s" % (e,)
- print
+ print()
+ print(" ERROR: %s" % e)
+ print()
sys.exit(1)
finally:
common.Cleanup()
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index 373bbed..cbb34b9 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -43,6 +43,8 @@ class RangeSet(object):
return self.data != other.data
def __nonzero__(self):
return bool(self.data)
+ def __bool__(self):
+ return self.__nonzero__()
def __str__(self):
if not self.data:
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 60d62c2..abdb845 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -65,14 +65,15 @@ Usage: sign_target_files_apks [flags] input_target_files output_target_files
"""
+from __future__ import print_function
+
import sys
if sys.hexversion < 0x02070000:
- print >> sys.stderr, "Python 2.7 or newer is required."
+ print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
import base64
-import cStringIO
import copy
import errno
import os
@@ -82,6 +83,11 @@ import subprocess
import tempfile
import zipfile
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
import add_img_to_target_files
import common
@@ -98,11 +104,11 @@ def GetApkCerts(tf_zip):
certmap = common.ReadApkCerts(tf_zip)
# apply the key remapping to the contents of the file
- for apk, cert in certmap.iteritems():
+ for apk, cert in certmap.items():
certmap[apk] = OPTIONS.key_map.get(cert, cert)
# apply all the -e options, overriding anything in the file
- for apk, cert in OPTIONS.extra_apks.iteritems():
+ for apk, cert in OPTIONS.extra_apks.items():
if not cert:
cert = "PRESIGNED"
certmap[apk] = OPTIONS.key_map.get(cert, cert)
@@ -120,10 +126,10 @@ def CheckAllApksSigned(input_tf_zip, apk_key_map):
if name not in apk_key_map:
unknown_apks.append(name)
if unknown_apks:
- print "ERROR: no key specified for:\n\n ",
- print "\n ".join(unknown_apks)
- print "\nUse '-e <apkname>=' to specify a key (which may be an"
- print "empty string to not sign this apk)."
+ print("ERROR: no key specified for:\n\n ", end=' ')
+ print("\n ".join(unknown_apks))
+ print("\nUse '-e <apkname>=' to specify a key (which may be an")
+ print("empty string to not sign this apk).")
sys.exit(1)
@@ -172,6 +178,9 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
if info.filename.startswith("IMAGES/"):
continue
+ if info.filename.startswith("BOOTABLE_IMAGES/"):
+ continue
+
data = input_tf_zip.read(info.filename)
out_info = copy.copy(info)
@@ -194,25 +203,29 @@ def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
name = os.path.basename(info.filename)
key = apk_key_map[name]
if key not in common.SPECIAL_CERT_STRINGS:
- print " signing: %-*s (%s)" % (maxsize, name, key)
+ print(" signing: %-*s (%s)" % (maxsize, name, key))
signed_data = SignApk(data, key, key_passwords[key])
common.ZipWriteStr(output_tf_zip, out_info, signed_data)
else:
# an APK we're not supposed to sign.
- print "NOT signing: %s" % (name,)
+ print("NOT signing: %s" % name)
common.ZipWriteStr(output_tf_zip, out_info, data)
elif info.filename in ("SYSTEM/build.prop",
"VENDOR/build.prop",
"BOOT/RAMDISK/default.prop",
"RECOVERY/RAMDISK/default.prop"):
- print "rewriting %s:" % (info.filename,)
+ print("rewriting %s:" % info.filename)
new_data = RewriteProps(data, misc_info)
common.ZipWriteStr(output_tf_zip, out_info, new_data)
if info.filename in ("BOOT/RAMDISK/default.prop",
"RECOVERY/RAMDISK/default.prop"):
write_to_temp(info.filename, info.external_attr, new_data)
elif info.filename.endswith("mac_permissions.xml"):
- print "rewriting %s with new keys." % (info.filename,)
+ print("rewriting %s with new keys." % info.filename)
+ new_data = ReplaceCerts(data)
+ common.ZipWriteStr(output_tf_zip, out_info, new_data)
+ elif info.filename.startswith("SYSTEM/etc/permissions/"):
+ print("rewriting %s with new keys." % info.filename)
new_data = ReplaceCerts(data)
common.ZipWriteStr(output_tf_zip, out_info, new_data)
elif info.filename in ("SYSTEM/recovery-from-boot.p",
@@ -257,10 +270,10 @@ def ReplaceCerts(data):
"""Given a string of data, replace all occurences of a set
of X509 certs with a newer set of X509 certs and return
the updated data string."""
- for old, new in OPTIONS.key_map.iteritems():
+ for old, new in OPTIONS.key_map.items():
try:
if OPTIONS.verbose:
- print " Replacing %s.x509.pem with %s.x509.pem" % (old, new)
+ print(" Replacing %s.x509.pem with %s.x509.pem" % (old, new))
f = open(old + ".x509.pem")
old_cert16 = base64.b16encode(common.ParseCertificate(f.read())).lower()
f.close()
@@ -271,14 +284,14 @@ def ReplaceCerts(data):
pattern = "\\b"+old_cert16+"\\b"
(data, num) = re.subn(pattern, new_cert16, data, flags=re.IGNORECASE)
if OPTIONS.verbose:
- print " Replaced %d occurence(s) of %s.x509.pem with " \
- "%s.x509.pem" % (num, old, new)
+ print(" Replaced %d occurence(s) of %s.x509.pem with "
+ "%s.x509.pem" % (num, old, new))
except IOError as e:
if e.errno == errno.ENOENT and not OPTIONS.verbose:
continue
- print " Error accessing %s. %s. Skip replacing %s.x509.pem " \
- "with %s.x509.pem." % (e.filename, e.strerror, old, new)
+ print(" Error accessing %s. %s. Skip replacing %s.x509.pem "
+ "with %s.x509.pem." % (e.filename, e.strerror, old, new))
return data
@@ -331,8 +344,8 @@ def RewriteProps(data, misc_info):
value = " ".join(value)
line = key + "=" + value
if line != original_line:
- print " replace: ", original_line
- print " with: ", line
+ print(" replace: ", original_line)
+ print(" with: ", line)
output.append(line)
return "\n".join(output) + "\n"
@@ -348,7 +361,7 @@ def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info):
extra_recovery_keys = [OPTIONS.key_map.get(k, k) + ".x509.pem"
for k in extra_recovery_keys.split()]
if extra_recovery_keys:
- print "extra recovery-only key(s): " + ", ".join(extra_recovery_keys)
+ print("extra recovery-only key(s): " + ", ".join(extra_recovery_keys))
else:
extra_recovery_keys = []
@@ -362,14 +375,14 @@ def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info):
mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem")
if mapped_keys:
- print "using:\n ", "\n ".join(mapped_keys)
- print "for OTA package verification"
+ print("using:\n ", "\n ".join(mapped_keys))
+ print("for OTA package verification")
else:
devkey = misc_info.get("default_system_dev_certificate",
"build/target/product/security/testkey")
mapped_keys.append(
OPTIONS.key_map.get(devkey, devkey) + ".x509.pem")
- print "META/otakeys.txt has no keys; using", mapped_keys[0]
+ print("META/otakeys.txt has no keys; using", mapped_keys[0])
# recovery uses a version of the key that has been slightly
# predigested (by DumpPublicKey.java) and put in res/keys.
@@ -385,11 +398,19 @@ def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info):
common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys",
new_recovery_keys)
+ # Save the base64 key representation in the update for key-change
+ # validations
+ p = common.Run(["python", "build/tools/getb64key.py", mapped_keys[0]],
+ stdout=subprocess.PIPE)
+ data, _ = p.communicate()
+ if p.returncode == 0:
+ common.ZipWriteStr(output_tf_zip, "META/releasekey.txt", data)
+
# SystemUpdateActivity uses the x509.pem version of the keys, but
# put into a zipfile system/etc/security/otacerts.zip.
# We DO NOT include the extra_recovery_keys (if any) here.
- temp_file = cStringIO.StringIO()
+ temp_file = StringIO()
certs_zip = zipfile.ZipFile(temp_file, "w")
for k in mapped_keys:
certs_zip.write(k)
@@ -400,7 +421,7 @@ def ReplaceOtaKeys(input_tf_zip, output_tf_zip, misc_info):
return new_recovery_keys
def ReplaceVerityPublicKey(targetfile_zip, key_path):
- print "Replacing verity public key with %s" % key_path
+ print("Replacing verity public key with %s" % key_path)
with open(key_path) as f:
data = f.read()
common.ZipWriteStr(targetfile_zip, "BOOT/RAMDISK/verity_key", data)
@@ -408,7 +429,7 @@ def ReplaceVerityPublicKey(targetfile_zip, key_path):
def ReplaceVerityPrivateKey(targetfile_input_zip, targetfile_output_zip,
misc_info, key_path):
- print "Replacing verity private key with %s" % key_path
+ print("Replacing verity private key with %s" % key_path)
current_key = misc_info["verity_key"]
original_misc_info = targetfile_input_zip.read("META/misc_info.txt")
new_misc_info = original_misc_info.replace(current_key, key_path)
@@ -499,14 +520,14 @@ def main(argv):
add_img_to_target_files.AddImagesToTargetFiles(args[1])
- print "done."
+ print("done.")
if __name__ == '__main__':
try:
main(sys.argv[1:])
- except common.ExternalError, e:
- print
- print " ERROR: %s" % (e,)
- print
+ except common.ExternalError as e:
+ print()
+ print(" ERROR: %s" % e)
+ print()
sys.exit(1)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 07f3c1c..fa4406c 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import bisect
import os
import struct
@@ -210,6 +212,16 @@ class SparseImage(object):
nonzero_blocks = []
reference = '\0' * self.blocksize
+ # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
+ # the whole system image will be treated as a single file. But for some
+ # unknown bug, the updater will be killed due to OOM when writing back the
+ # patched image to flash (observed on lenok-userdebug MEA49). Prior to
+ # getting a real fix, we evenly divide the non-zero blocks into smaller
+ # groups (currently 1024 blocks or 4MB per group).
+ # Bug: 23227672
+ MAX_BLOCKS_PER_GROUP = 1024
+ nonzero_groups = []
+
f = self.simg_f
for s, e in remaining:
for b in range(s, e):
@@ -232,12 +244,22 @@ class SparseImage(object):
nonzero_blocks.append(b)
nonzero_blocks.append(b+1)
- assert zero_blocks or nonzero_blocks or clobbered_blocks
+ if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
+ nonzero_groups.append(nonzero_blocks)
+ # Clear the list.
+ nonzero_blocks = []
+
+ if nonzero_blocks:
+ nonzero_groups.append(nonzero_blocks)
+ nonzero_blocks = []
+
+ assert zero_blocks or nonzero_groups or clobbered_blocks
if zero_blocks:
out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
- if nonzero_blocks:
- out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
+ if nonzero_groups:
+ for i, blocks in enumerate(nonzero_groups):
+ out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
if clobbered_blocks:
out["__COPY"] = clobbered_blocks
diff --git a/tools/repopick.py b/tools/repopick.py
new file mode 100755
index 0000000..a53c043
--- /dev/null
+++ b/tools/repopick.py
@@ -0,0 +1,377 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2013-15 The CyanogenMod Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Run repopick.py -h for a description of this utility.
+#
+
+from __future__ import print_function
+
+import sys
+import json
+import os
+import subprocess
+import re
+import argparse
+import textwrap
+from xml.etree import ElementTree
+
+try:
+ # For python3
+ import urllib.error
+ import urllib.request
+except ImportError:
+ # For python2
+ import imp
+ import urllib2
+ urllib = imp.new_module('urllib')
+ urllib.error = urllib2
+ urllib.request = urllib2
+
+
+# Verifies whether pathA is a subdirectory (or the same) as pathB
+def is_subdir(a, b):
+ a = os.path.realpath(a) + '/'
+ b = os.path.realpath(b) + '/'
+ return b == a[:len(b)]
+
+
+def fetch_query_via_ssh(remote_url, query):
+ """Given a remote_url and a query, return the list of changes that fit it
+ This function is slightly messy - the ssh api does not return data in the same structure as the HTTP REST API
+ We have to get the data, then transform it to match what we're expecting from the HTTP RESET API"""
+ if remote_url.count(':') == 2:
+ (uri, userhost, port) = remote_url.split(':')
+ userhost = userhost[2:]
+ elif remote_url.count(':') == 1:
+ (uri, userhost) = remote_url.split(':')
+ userhost = userhost[2:]
+ port = 29418
+ else:
+ raise Exception('Malformed URI: Expecting ssh://[user@]host[:port]')
+
+
+ out = subprocess.check_output(['ssh', '-x', '-p{0}'.format(port), userhost, 'gerrit', 'query', '--format=JSON --patch-sets --current-patch-set', query])
+ if not hasattr(out, 'encode'):
+ out = out.decode()
+ reviews = []
+ for line in out.split('\n'):
+ try:
+ data = json.loads(line)
+ # make our data look like the http rest api data
+ review = {
+ 'branch': data['branch'],
+ 'change_id': data['id'],
+ 'current_revision': data['currentPatchSet']['revision'],
+ 'number': int(data['number']),
+ 'revisions': {patch_set['revision']: {
+ 'number': int(patch_set['number']),
+ 'fetch': {
+ 'ssh': {
+ 'ref': patch_set['ref'],
+ 'url': 'ssh://{0}:{1}/{2}'.format(userhost, port, data['project'])
+ }
+ }
+ } for patch_set in data['patchSets']},
+ 'subject': data['subject'],
+ 'project': data['project'],
+ 'status': data['status']
+ }
+ reviews.append(review)
+ except:
+ pass
+ args.quiet or print('Found {0} reviews'.format(len(reviews)))
+ return reviews
+
+
+def fetch_query_via_http(remote_url, query):
+
+ """Given a query, fetch the change numbers via http"""
+ url = '{0}/changes/?q={1}&o=CURRENT_REVISION&o=ALL_REVISIONS'.format(remote_url, query)
+ data = urllib.request.urlopen(url).read().decode('utf-8')
+ reviews = json.loads(data[5:])
+
+ for review in reviews:
+ review['number'] = review.pop('_number')
+
+ return reviews
+
+
+def fetch_query(remote_url, query):
+ """Wrapper for fetch_query_via_proto functions"""
+ if remote_url[0:3] == 'ssh':
+ return fetch_query_via_ssh(remote_url, query)
+ elif remote_url[0:4] == 'http':
+ return fetch_query_via_http(remote_url, query.replace(' ', '+'))
+ else:
+ raise Exception('Gerrit URL should be in the form http[s]://hostname/ or ssh://[user@]host[:port]')
+
+if __name__ == '__main__':
+ # Default to CyanogenMod Gerrit
+ default_gerrit = 'http://review.cyanogenmod.org'
+
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\
+ repopick.py is a utility to simplify the process of cherry picking
+ patches from CyanogenMod's Gerrit instance (or any gerrit instance of your choosing)
+
+ Given a list of change numbers, repopick will cd into the project path
+ and cherry pick the latest patch available.
+
+ With the --start-branch argument, the user can specify that a branch
+ should be created before cherry picking. This is useful for
+ cherry-picking many patches into a common branch which can be easily
+ abandoned later (good for testing other's changes.)
+
+ The --abandon-first argument, when used in conjunction with the
+ --start-branch option, will cause repopick to abandon the specified
+ branch in all repos first before performing any cherry picks.'''))
+ parser.add_argument('change_number', nargs='*', help='change number to cherry pick. Use {change number}/{patchset number} to get a specific revision.')
+ parser.add_argument('-i', '--ignore-missing', action='store_true', help='do not error out if a patch applies to a missing directory')
+ parser.add_argument('-s', '--start-branch', nargs=1, help='start the specified branch before cherry picking')
+ parser.add_argument('-a', '--abandon-first', action='store_true', help='before cherry picking, abandon the branch specified in --start-branch')
+ parser.add_argument('-b', '--auto-branch', action='store_true', help='shortcut to "--start-branch auto --abandon-first --ignore-missing"')
+ parser.add_argument('-q', '--quiet', action='store_true', help='print as little as possible')
+ parser.add_argument('-v', '--verbose', action='store_true', help='print extra information to aid in debug')
+ parser.add_argument('-f', '--force', action='store_true', help='force cherry pick even if change is closed')
+ parser.add_argument('-p', '--pull', action='store_true', help='execute pull instead of cherry-pick')
+ parser.add_argument('-P', '--path', help='use the specified path for the change')
+ parser.add_argument('-t', '--topic', help='pick all commits from a specified topic')
+ parser.add_argument('-Q', '--query', help='pick all commits using the specified query')
+ parser.add_argument('-g', '--gerrit', default=default_gerrit, help='Gerrit Instance to use. Form proto://[user@]host[:port]')
+ parser.add_argument('-e', '--exclude', nargs=1, help='exclude a list of commit numbers separated by a ,')
+ args = parser.parse_args()
+ if not args.start_branch and args.abandon_first:
+ parser.error('if --abandon-first is set, you must also give the branch name with --start-branch')
+ if args.auto_branch:
+ args.abandon_first = True
+ args.ignore_missing = True
+ if not args.start_branch:
+ args.start_branch = ['auto']
+ if args.quiet and args.verbose:
+ parser.error('--quiet and --verbose cannot be specified together')
+
+ if (1 << bool(args.change_number) << bool(args.topic) << bool(args.query)) != 2:
+ parser.error('One (and only one) of change_number, topic, and query are allowed')
+
+ # Change current directory to the top of the tree
+ if 'ANDROID_BUILD_TOP' in os.environ:
+ top = os.environ['ANDROID_BUILD_TOP']
+
+ if not is_subdir(os.getcwd(), top):
+ sys.stderr.write('ERROR: You must run this tool from within $ANDROID_BUILD_TOP!\n')
+ sys.exit(1)
+ os.chdir(os.environ['ANDROID_BUILD_TOP'])
+
+ # Sanity check that we are being run from the top level of the tree
+ if not os.path.isdir('.repo'):
+ sys.stderr.write('ERROR: No .repo directory found. Please run this from the top of your tree.\n')
+ sys.exit(1)
+
+ # If --abandon-first is given, abandon the branch before starting
+ if args.abandon_first:
+ # Determine if the branch already exists; skip the abandon if it does not
+ plist = subprocess.check_output(['repo', 'info'])
+ if not hasattr(plist, 'encode'):
+ plist = plist.decode()
+ needs_abandon = False
+ for pline in plist.splitlines():
+ matchObj = re.match(r'Local Branches.*\[(.*)\]', pline)
+ if matchObj:
+ local_branches = re.split('\s*,\s*', matchObj.group(1))
+ if any(args.start_branch[0] in s for s in local_branches):
+ needs_abandon = True
+
+ if needs_abandon:
+ # Perform the abandon only if the branch already exists
+ if not args.quiet:
+ print('Abandoning branch: %s' % args.start_branch[0])
+ subprocess.check_output(['repo', 'abandon', args.start_branch[0]])
+ if not args.quiet:
+ print('')
+
+ # Get the master manifest from repo
+ # - convert project name and revision to a path
+ project_name_to_data = {}
+ manifest = subprocess.check_output(['repo', 'manifest'])
+ xml_root = ElementTree.fromstring(manifest)
+ projects = xml_root.findall('project')
+ default_revision = xml_root.findall('default')[0].get('revision').split('/')[-1]
+
+ #dump project data into the a list of dicts with the following data:
+ #{project: {path, revision}}
+
+ for project in projects:
+ name = project.get('name')
+ path = project.get('path')
+ revision = project.get('revision')
+ if revision is None:
+ revision = default_revision
+
+ if not name in project_name_to_data:
+ project_name_to_data[name] = {}
+ project_name_to_data[name][revision] = path
+
+ # get data on requested changes
+ reviews = []
+ change_numbers = []
+ if args.topic:
+ reviews = fetch_query(args.gerrit, 'topic:{0}'.format(args.topic))
+ change_numbers = sorted([str(r['number']) for r in reviews])
+ if args.query:
+ reviews = fetch_query(args.gerrit, args.query)
+ change_numbers = sorted([str(r['number']) for r in reviews])
+ if args.change_number:
+ for c in args.change_number:
+ if '-' in c:
+ templist = c.split('-')
+ for i in range(int(templist[0]), int(templist[1]) + 1):
+ change_numbers.append(str(i))
+ else:
+ change_numbers.append(c)
+ reviews = fetch_query(args.gerrit, ' OR '.join('change:{0}'.format(x.split('/')[0]) for x in change_numbers))
+
+ # make list of things to actually merge
+ mergables = []
+
+ # If --exclude is given, create the list of commits to ignore
+ exclude = []
+ if args.exclude:
+ exclude = args.exclude[0].split(',')
+
+ for change in change_numbers:
+ patchset = None
+ if '/' in change:
+ (change, patchset) = change.split('/')
+
+ if change in exclude:
+ continue
+
+ change = int(change)
+ review = next((x for x in reviews if x['number'] == change), None)
+ if review is None:
+ print('Change %d not found, skipping' % change)
+ continue
+
+ mergables.append({
+ 'subject': review['subject'],
+ 'project': review['project'],
+ 'branch': review['branch'],
+ 'change_number': review['number'],
+ 'status': review['status'],
+ 'fetch': None
+ })
+ mergables[-1]['fetch'] = review['revisions'][review['current_revision']]['fetch']
+ mergables[-1]['id'] = change
+ if patchset:
+ try:
+ mergables[-1]['fetch'] = [x['fetch'] for x in review['revisions'] if x['_number'] == patchset][0]
+ mergables[-1]['id'] = '{0}/{1}'.format(change, patchset)
+ except (IndexError, ValueError):
+ args.quiet or print('ERROR: The patch set {0}/{1} could not be found, using CURRENT_REVISION instead.'.format(change, patchset))
+
+ for item in mergables:
+ args.quiet or print('Applying change number {0}...'.format(item['id']))
+ # Check if change is open and exit if it's not, unless -f is specified
+ if (item['status'] != 'OPEN' and item['status'] != 'NEW') and not args.query:
+ if args.force:
+ print('!! Force-picking a closed change !!\n')
+ else:
+ print('Change status is ' + item['status'] + '. Skipping the cherry pick.\nUse -f to force this pick.')
+ continue
+
+ # Convert the project name to a project path
+ # - check that the project path exists
+ project_path = None
+
+ if item['project'] in project_name_to_data and item['branch'] in project_name_to_data[item['project']]:
+ project_path = project_name_to_data[item['project']][item['branch']]
+ elif args.path:
+ project_path = args.path
+ elif args.ignore_missing:
+ print('WARNING: Skipping {0} since there is no project directory for: {1}\n'.format(item['id'], item['project']))
+ continue
+ else:
+ sys.stderr.write('ERROR: For {0}, could not determine the project path for project {1}\n'.format(item['id'], item['project']))
+ sys.exit(1)
+
+ # If --start-branch is given, create the branch (more than once per path is okay; repo ignores gracefully)
+ if args.start_branch:
+ subprocess.check_output(['repo', 'start', args.start_branch[0], project_path])
+
+ # Print out some useful info
+ if not args.quiet:
+ print('--> Subject: "{0}"'.format(item['subject']))
+ print('--> Project path: {0}'.format(project_path))
+ print('--> Change number: {0} (Patch Set {0})'.format(item['id']))
+
+ if 'anonymous http' in item['fetch']:
+ method = 'anonymous http'
+ else:
+ method = 'ssh'
+
+ # Try fetching from GitHub first if using default gerrit
+ if args.gerrit == default_gerrit:
+ if args.verbose:
+ print('Trying to fetch the change from GitHub')
+
+ if args.pull:
+ cmd = ['git pull --no-edit github', item['fetch'][method]['ref']]
+ else:
+ cmd = ['git fetch github', item['fetch'][method]['ref']]
+ if args.quiet:
+ cmd.append('--quiet')
+ else:
+ print(cmd)
+ result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
+ FETCH_HEAD = '{0}/.git/FETCH_HEAD'.format(project_path)
+ if result != 0 and os.stat(FETCH_HEAD).st_size != 0:
+ print('ERROR: git command failed')
+ sys.exit(result)
+ # Check if it worked
+ if args.gerrit != default_gerrit or os.stat(FETCH_HEAD).st_size == 0:
+ # If not using the default gerrit or github failed, fetch from gerrit.
+ if args.verbose:
+ if args.gerrit == default_gerrit:
+ print('Fetching from GitHub didn\'t work, trying to fetch the change from Gerrit')
+ else:
+ print('Fetching from {0}'.format(args.gerrit))
+
+ if args.pull:
+ cmd = ['git pull --no-edit', item['fetch'][method]['url'], item['fetch'][method]['ref']]
+ else:
+ cmd = ['git fetch', item['fetch'][method]['url'], item['fetch'][method]['ref']]
+ if args.quiet:
+ cmd.append('--quiet')
+ else:
+ print(cmd)
+ result = subprocess.call([' '.join(cmd)], cwd=project_path, shell=True)
+ if result != 0:
+ print('ERROR: git command failed')
+ sys.exit(result)
+ # Perform the cherry-pick
+ if not args.pull:
+ cmd = ['git cherry-pick FETCH_HEAD']
+ if args.quiet:
+ cmd_out = open(os.devnull, 'wb')
+ else:
+ cmd_out = None
+ result = subprocess.call(cmd, cwd=project_path, shell=True, stdout=cmd_out, stderr=cmd_out)
+ if result != 0:
+ print('ERROR: git command failed')
+ sys.exit(result)
+ if not args.quiet:
+ print('')
diff --git a/tools/roomservice.py b/tools/roomservice.py
new file mode 100755
index 0000000..a1b69cd
--- /dev/null
+++ b/tools/roomservice.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+# Copyright (C) 2012-2013, The CyanogenMod Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import base64
+import json
+import netrc
+import os
+import re
+import sys
+try:
+ # For python3
+ import urllib.error
+ import urllib.parse
+ import urllib.request
+except ImportError:
+ # For python2
+ import imp
+ import urllib2
+ import urlparse
+ urllib = imp.new_module('urllib')
+ urllib.error = urllib2
+ urllib.parse = urlparse
+ urllib.request = urllib2
+
+from xml.etree import ElementTree
+
+product = sys.argv[1]
+
+if len(sys.argv) > 2:
+ depsonly = sys.argv[2]
+else:
+ depsonly = None
+
+try:
+ device = product[product.index("_") + 1:]
+except:
+ device = product
+
+if not depsonly:
+ print("Device %s not found. Attempting to retrieve device repository from CyanogenMod Github (http://github.com/CyanogenMod)." % device)
+
+repositories = []
+
+try:
+ authtuple = netrc.netrc().authenticators("api.github.com")
+
+ if authtuple:
+ auth_string = ('%s:%s' % (authtuple[0], authtuple[2])).encode()
+ githubauth = base64.encodestring(auth_string).decode().replace('\n', '')
+ else:
+ githubauth = None
+except:
+ githubauth = None
+
+def add_auth(githubreq):
+ if githubauth:
+ githubreq.add_header("Authorization","Basic %s" % githubauth)
+
+if not depsonly:
+ githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:CyanogenMod+in:name+fork:true" % device)
+ add_auth(githubreq)
+ try:
+ result = json.loads(urllib.request.urlopen(githubreq).read().decode())
+ except urllib.error.URLError:
+ print("Failed to search GitHub")
+ sys.exit()
+ except ValueError:
+ print("Failed to parse return data from GitHub")
+ sys.exit()
+ for res in result.get('items', []):
+ repositories.append(res)
+
+local_manifests = r'.repo/local_manifests'
+if not os.path.exists(local_manifests): os.makedirs(local_manifests)
+
+def exists_in_tree(lm, path):
+ for child in lm.getchildren():
+ if child.attrib['path'] == path:
+ return True
+ return False
+
+# in-place prettyprint formatter
+def indent(elem, level=0):
+ i = "\n" + level*" "
+ if len(elem):
+ if not elem.text or not elem.text.strip():
+ elem.text = i + " "
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ for elem in elem:
+ indent(elem, level+1)
+ if not elem.tail or not elem.tail.strip():
+ elem.tail = i
+ else:
+ if level and (not elem.tail or not elem.tail.strip()):
+ elem.tail = i
+
+def get_default_revision():
+ m = ElementTree.parse(".repo/manifest.xml")
+ d = m.findall('default')[0]
+ r = d.get('revision')
+ return r.replace('refs/heads/', '').replace('refs/tags/', '')
+
+def get_from_manifest(devicename):
+ try:
+ lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
+ lm = lm.getroot()
+ except:
+ lm = ElementTree.Element("manifest")
+
+ for localpath in lm.findall("project"):
+ if re.search("android_device_.*_%s$" % device, localpath.get("name")):
+ return localpath.get("path")
+
+ # Devices originally from AOSP are in the main manifest...
+ try:
+ mm = ElementTree.parse(".repo/manifest.xml")
+ mm = mm.getroot()
+ except:
+ mm = ElementTree.Element("manifest")
+
+ for localpath in mm.findall("project"):
+ if re.search("android_device_.*_%s$" % device, localpath.get("name")):
+ return localpath.get("path")
+
+ return None
+
+def is_in_manifest(projectpath):
+ try:
+ lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
+ lm = lm.getroot()
+ except:
+ lm = ElementTree.Element("manifest")
+
+ for localpath in lm.findall("project"):
+ if localpath.get("path") == projectpath:
+ return True
+
+ ## Search in main manifest, too
+ try:
+ lm = ElementTree.parse(".repo/manifest.xml")
+ lm = lm.getroot()
+ except:
+ lm = ElementTree.Element("manifest")
+
+ for localpath in lm.findall("project"):
+ if localpath.get("path") == projectpath:
+ return True
+
+ return False
+
+def add_to_manifest(repositories, fallback_branch = None):
+ try:
+ lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
+ lm = lm.getroot()
+ except:
+ lm = ElementTree.Element("manifest")
+
+ for repository in repositories:
+ repo_name = repository['repository']
+ repo_target = repository['target_path']
+ print('Checking if %s is fetched from %s' % (repo_target, repo_name))
+ if is_in_manifest(repo_target):
+ print('CyanogenMod/%s already fetched to %s' % (repo_name, repo_target))
+ continue
+
+ print('Adding dependency: CyanogenMod/%s -> %s' % (repo_name, repo_target))
+ project = ElementTree.Element("project", attrib = { "path": repo_target,
+ "remote": "github", "name": "CyanogenMod/%s" % repo_name })
+
+ if 'branch' in repository:
+ project.set('revision',repository['branch'])
+ elif fallback_branch:
+ print("Using fallback branch %s for %s" % (fallback_branch, repo_name))
+ project.set('revision', fallback_branch)
+ else:
+ print("Using default branch for %s" % repo_name)
+
+ lm.append(project)
+
+ indent(lm, 0)
+ raw_xml = ElementTree.tostring(lm).decode()
+ raw_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + raw_xml
+
+ f = open('.repo/local_manifests/roomservice.xml', 'w')
+ f.write(raw_xml)
+ f.close()
+
+def fetch_dependencies(repo_path, fallback_branch = None):
+ print('Looking for dependencies')
+ dependencies_path = repo_path + '/cm.dependencies'
+ syncable_repos = []
+
+ if os.path.exists(dependencies_path):
+ dependencies_file = open(dependencies_path, 'r')
+ dependencies = json.loads(dependencies_file.read())
+ fetch_list = []
+
+ for dependency in dependencies:
+ if not is_in_manifest(dependency['target_path']):
+ fetch_list.append(dependency)
+ syncable_repos.append(dependency['target_path'])
+
+ dependencies_file.close()
+
+ if len(fetch_list) > 0:
+ print('Adding dependencies to manifest')
+ add_to_manifest(fetch_list, fallback_branch)
+ else:
+ print('Dependencies file not found, bailing out.')
+
+ if len(syncable_repos) > 0:
+ print('Syncing dependencies')
+ os.system('repo sync --force-sync %s' % ' '.join(syncable_repos))
+
+ for deprepo in syncable_repos:
+ fetch_dependencies(deprepo)
+
+def has_branch(branches, revision):
+ return revision in [branch['name'] for branch in branches]
+
+if depsonly:
+ repo_path = get_from_manifest(device)
+ if repo_path:
+ fetch_dependencies(repo_path)
+ else:
+ print("Trying dependencies-only mode on a non-existing device tree?")
+
+ sys.exit()
+
+else:
+ for repository in repositories:
+ repo_name = repository['name']
+ if repo_name.startswith("android_device_") and repo_name.endswith("_" + device):
+ print("Found repository: %s" % repository['name'])
+
+ manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "")
+
+ default_revision = get_default_revision()
+ print("Default revision: %s" % default_revision)
+ print("Checking branch info")
+ githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', ''))
+ add_auth(githubreq)
+ result = json.loads(urllib.request.urlopen(githubreq).read().decode())
+
+ ## Try tags, too, since that's what releases use
+ if not has_branch(result, default_revision):
+ githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', ''))
+ add_auth(githubreq)
+ result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode()))
+
+ repo_path = "device/%s/%s" % (manufacturer, device)
+ adding = {'repository':repo_name,'target_path':repo_path}
+
+ fallback_branch = None
+ if not has_branch(result, default_revision):
+ if os.getenv('ROOMSERVICE_BRANCHES'):
+ fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')))
+ for fallback in fallbacks:
+ if has_branch(result, fallback):
+ print("Using fallback branch: %s" % fallback)
+ fallback_branch = fallback
+ break
+
+ if not fallback_branch:
+ print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name))
+ print("Branches found:")
+ for branch in [branch['name'] for branch in result]:
+ print(branch)
+ print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.")
+ sys.exit()
+
+ add_to_manifest([adding], fallback_branch)
+
+ print("Syncing repository to retrieve project.")
+ os.system('repo sync --force-sync %s' % repo_path)
+ print("Repository synced!")
+
+ fetch_dependencies(repo_path, fallback_branch)
+ print("Done")
+ sys.exit()
+
+print("Repository for %s not found in the CyanogenMod Github repository list. If this is in error, you may need to manually add it to your local_manifests/roomservice.xml." % device)
diff --git a/tools/signapk/SignApk.java b/tools/signapk/SignApk.java
index 88f486a..3ddab11 100644
--- a/tools/signapk/SignApk.java
+++ b/tools/signapk/SignApk.java
@@ -167,18 +167,29 @@ class SignApk {
}
/**
- * Reads the password from console and returns it as a string.
+ * If a console doesn't exist, reads the password from stdin
+ * If a console exists, reads the password from console and returns it as a string.
*
* @param keyFile The file containing the private key. Used to prompt the user.
*/
private static String readPassword(File keyFile) {
Console console;
char[] pwd;
- if((console = System.console()) != null &&
- (pwd = console.readPassword("[%s]", "Enter password for " + keyFile)) != null){
- return String.valueOf(pwd);
+ if ((console = System.console()) == null) {
+ System.out.print("Enter password for " + keyFile + " (password will not be hidden): ");
+ System.out.flush();
+ BufferedReader stdin = new BufferedReader(new InputStreamReader(System.in));
+ try {
+ return stdin.readLine();
+ } catch (IOException ex) {
+ return null;
+ }
} else {
- return null;
+ if ((pwd = console.readPassword("[%s]", "Enter password for " + keyFile)) != null) {
+ return String.valueOf(pwd);
+ } else {
+ return null;
+ }
}
}
diff --git a/tools/warn.py b/tools/warn.py
index 8097123..b5a49f6 100755
--- a/tools/warn.py
+++ b/tools/warn.py
@@ -1,11 +1,13 @@
#!/usr/bin/env python
# This file uses the following encoding: utf-8
+from __future__ import print_function
+
import sys
import re
if len(sys.argv) == 1:
- print 'usage: ' + sys.argv[0] + ' <build.log>'
+ print('usage: ' + sys.argv[0] + ' <build.log>')
sys.exit()
# if you add another level, don't forget to give it a color below
@@ -399,7 +401,7 @@ cur_row_color = 0
row_colors = [ 'e0e0e0', 'd0d0d0' ]
def output(text):
- print text,
+ print(text, end=' ')
def htmlbig(param):
return '<font size="+2">' + param + '</font>'