Merge "Remove extra JNI libs from prebuilt APKs"
diff --git a/core/Makefile b/core/Makefile
index a2d9339..517410a 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -2916,7 +2916,7 @@
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
# BOARD_SUPER_PARTITION_SIZE must be defined to build super image.
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
@@ -2940,9 +2940,9 @@
--metadata-slots $(if $(1),2,1) \
--device-size $(BOARD_SUPER_PARTITION_SIZE) \
$(foreach name,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- --partition $(name)$(1):$$($(UUIDGEN) $(name)$(1)):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
+ --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
$(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
- $(if $(1), --partition $(name)_b:$$($(UUIDGEN) $(name)_b):readonly:0) \
+ $(if $(1), --partition $(name)_b:readonly:0) \
)
endef
@@ -2977,32 +2977,61 @@
# Do not check for apps-only build
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
-ifdef BOARD_SUPER_PARTITION_SIZE
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-droid_targets: check_android_partition_sizes
+droid_targets: check-all-partition-sizes
-.PHONY: check_android_partition_sizes
+.PHONY: check-all-partition-sizes check-all-partition-sizes-nodeps
# Add image dependencies so that generated_*_image_info.txt are written before checking.
-check_android_partition_sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+check-all-partition-sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
-check_android_partition_sizes:
- partition_size_list="$(call read-size-of-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))"; \
- sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
- max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
- max_size_expr=$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}; \
- if [ $$(( $${sum_sizes_expr} )) -gt $$(( $${max_size_expr} )) ]; then \
- echo "The sum of sizes of all logical partitions is larger than BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
- echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
- exit 1; \
- else \
- echo "The sum of sizes of all logical partitions is within BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
- echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
- fi
+# $(1): human-readable max size string
+# $(2): max size expression
+# $(3): list of partition names
+define check-sum-of-partition-sizes
+ partition_size_list="$(call read-size-of-partitions,$(3))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $(2) )) ]; then \
+ echo "The sum of sizes of [$(strip $(3))] is larger than $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' "$(2)" '==' $$(( $(2) )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(3))] is within $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' "$(2)" '==' $$(( $(2) )); \
+ fi
+endef
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
-endif # BOARD_SUPER_PARTITION_SIZE
+define check-all-partition-sizes-target
+ # Check sum(all partitions) <= super partition (/ 2 for A/B)
+ $(if $(BOARD_SUPER_PARTITION_SIZE),$(if $(BOARD_SUPER_PARTITION_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_SUPER_PARTITION_SIZE$(if $(filter true,$(AB_OTA_UPDATER)), / 2), \
+ $(BOARD_SUPER_PARTITION_SIZE)$(if $(filter true,$(AB_OTA_UPDATER)), / 2),$(BOARD_SUPER_PARTITION_PARTITION_LIST))))
+
+ # For each group, check sum(partitions in group) <= group size
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(BOARD_$(group)_SIZE),$(if $(BOARD_$(group)_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_$(group)_SIZE,$(BOARD_$(group)_SIZE),$(BOARD_$(group)_PARTITION_LIST)))))
+
+ # Check sum(all group sizes) <= super partition (/ 2 for A/B)
+ if [[ ! -z $(BOARD_SUPER_PARTITION_SIZE) ]]; then \
+ group_size_list="$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)),$(BOARD_$(group)_SIZE))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${group_size_list}"); \
+ max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
+ max_size_expr="$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}"; \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $${max_size_expr} )) ]; then \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is larger than BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is within BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ fi \
+ fi
+endef
+
+check-all-partition-sizes check-all-partition-sizes-nodeps:
+ $(call check-all-partition-sizes-target)
+
endif # PRODUCT_BUILD_SUPER_PARTITION
endif # TARGET_BUILD_APPS
@@ -3602,7 +3631,7 @@
ifdef BUILT_VENDOR_MATRIX
$(hide) cp $(BUILT_VENDOR_MATRIX) $(zip_root)/META/vendor_matrix.xml
endif
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
$(hide) echo "super_size=$(BOARD_SUPER_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "lpmake=$(notdir $(LPMAKE))" >> $(zip_root)/META/misc_info.txt
$(hide) echo -n "lpmake_args=" >> $(zip_root)/META/misc_info.txt
diff --git a/core/base_rules.mk b/core/base_rules.mk
index fcc8ede..57fd818 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -759,8 +759,6 @@
ALL_MODULES.$(my_register_name).MODULE_NAME := $(LOCAL_MODULE)
ALL_MODULES.$(my_register_name).COMPATIBILITY_SUITES := $(LOCAL_COMPATIBILITY_SUITE)
ALL_MODULES.$(my_register_name).TEST_CONFIG := $(test_config)
-ALL_MODULES.$(my_register_name).SRCS := \
- $(ALL_MODULES.$(my_register_name).SRCS) $(LOCAL_SRC_FILES)
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index b8ee423..07fb48a 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -46,8 +46,8 @@
my_cflags := $(LOCAL_CFLAGS)
my_conlyflags := $(LOCAL_CONLYFLAGS)
my_cppflags := $(LOCAL_CPPFLAGS)
-my_cflags_no_override := $(GLOBAL_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override := $(GLOBAL_CPPFLAGS_NO_OVERRIDE)
+my_cflags_no_override := $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
+my_cppflags_no_override := $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_ldflags := $(LOCAL_LDFLAGS)
my_ldlibs := $(LOCAL_LDLIBS)
my_asflags := $(LOCAL_ASFLAGS)
@@ -626,8 +626,6 @@
# actually used (although they are usually empty).
arm_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(arm_objects_mode)_CFLAGS)
normal_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(normal_objects_mode)_CFLAGS)
-arm_objects_cflags := $(call convert-to-clang-flags,$(arm_objects_cflags))
-normal_objects_cflags := $(call convert-to-clang-flags,$(normal_objects_cflags))
else
arm_objects_mode :=
@@ -1561,8 +1559,6 @@
my_cflags += $(LOCAL_CLANG_CFLAGS)
my_conlyflags += $(LOCAL_CLANG_CONLYFLAGS)
my_cppflags += $(LOCAL_CLANG_CPPFLAGS)
-my_cflags_no_override += $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override += $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_asflags += $(LOCAL_CLANG_ASFLAGS)
my_ldflags += $(LOCAL_CLANG_LDFLAGS)
my_cflags += $(LOCAL_CLANG_CFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CFLAGS_$(my_32_64_bit_suffix))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 5aa27ca..07e34e1 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -318,6 +318,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_ARCH):=
LOCAL_SRC_FILES_$(TARGET_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_ARCH):=
@@ -340,6 +341,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_2ND_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_$(TARGET_2ND_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_2ND_ARCH):=
diff --git a/core/config.mk b/core/config.mk
index 483bc77..b9174b3 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -99,6 +99,15 @@
TARGET_CLANG_SUPPORTED 2ND_TARGET_CLANG_SUPPORTED \
TARGET_CC 2ND_TARGET_CC \
TARGET_CXX 2ND_TARGET_CXX \
+ TARGET_TOOLCHAIN_ROOT 2ND_TARGET_TOOLCHAIN_ROOT \
+ HOST_TOOLCHAIN_ROOT 2ND_HOST_TOOLCHAIN_ROOT \
+ HOST_CROSS_TOOLCHAIN_ROOT 2ND_HOST_CROSS_TOOLCHAIN_ROOT \
+ HOST_TOOLS_PREFIX 2ND_HOST_TOOLS_PREFIX \
+ HOST_CROSS_TOOLS_PREFIX 2ND_HOST_CROSS_TOOLS_PREFIX \
+ HOST_GCC_VERSION 2ND_HOST_GCC_VERSION \
+ HOST_CROSS_GCC_VERSION 2ND_HOST_CROSS_GCC_VERSION \
+ TARGET_NDK_GCC_VERSION 2ND_TARGET_NDK_GCC_VERSION \
+ GLOBAL_CFLAGS_NO_OVERRIDE GLOBAL_CPPFLAGS_NO_OVERRIDE \
,GCC support has been removed. Use Clang instead)
# This is marked as obsolete in envsetup.mk after reading the BoardConfig.mk
@@ -705,7 +714,6 @@
DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
FAT16COPY := build/make/tools/fat16copy.py
CHECK_LINK_TYPE := build/make/tools/check_link_type.py
-UUIDGEN := build/make/tools/uuidgen.py
LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
PROGUARD := external/proguard/bin/proguard.sh
@@ -1000,16 +1008,42 @@
endif # PRODUCT_USE_DYNAMIC_PARTITION_SIZE
ifeq ($(PRODUCT_BUILD_SUPER_PARTITION),true)
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-# BOARD_SUPER_PARTITION_PARTITION_LIST: a list of the following tokens
+
+# BOARD_SUPER_PARTITION_GROUPS defines a list of "updatable groups". Each updatable group is a
+# group of partitions that share the same pool of free spaces.
+# For each group in BOARD_SUPER_PARTITION_GROUPS, a BOARD_{GROUP}_SIZE and
+# BOARD_{GROUP}_PARTITION_PARTITION_LIST may be defined.
+# - BOARD_{GROUP}_SIZE: The maximum sum of sizes of all partitions in the group.
+# If empty, no limit is enforced on the sum of sizes for this group.
+# - BOARD_{GROUP}_PARTITION_PARTITION_LIST: the list of partitions that belongs to this group.
+# If empty, no partitions belong to this group, and the sum of sizes is effectively 0.
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(eval BOARD_$(group)_SIZE ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_SIZE) \
+ $(eval BOARD_$(group)_PARTITION_LIST ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_PARTITION_LIST) \
+)
+
+# BOARD_*_PARTITION_LIST: a list of the following tokens
valid_super_partition_list := system vendor product product_services
-ifneq (,$(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST)))
-$(error BOARD_SUPER_PARTITION_PARTITION_LIST contains invalid partition name \
- ($(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST))). \
- Valid names are $(valid_super_partition_list))
-endif
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)), \
+ $(error BOARD_$(group)_PARTITION_LIST contains invalid partition name \
+ $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)). \
+ Valid names are $(valid_super_partition_list))))
valid_super_partition_list :=
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
+
+
+# Define BOARD_SUPER_PARTITION_PARTITION_LIST, the sum of all BOARD_*_PARTITION_LIST
+ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
+$(error BOARD_SUPER_PARTITION_PARTITION_LIST should not be defined, but computed from \
+ BOARD_SUPER_PARTITION_GROUPS and BOARD_*_PARTITION_LIST)
+endif
+BOARD_SUPER_PARTITION_PARTITION_LIST := \
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(BOARD_$(group)_PARTITION_LIST))
+.KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
+
endif # PRODUCT_BUILD_SUPER_PARTITION
# ###############################################################
diff --git a/core/definitions.mk b/core/definitions.mk
index 5f73fb5..5a14826 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2484,7 +2484,8 @@
$(eval _cmf_tuple := $(subst :, ,$(f))) \
$(eval _cmf_src := $(word 1,$(_cmf_tuple))) \
$(eval _cmf_dest := $(word 2,$(_cmf_tuple))) \
- $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest))) \
+ $(if $(filter-out $(_cmf_src), $(_cmf_dest)), \
+ $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest)))) \
$(_cmf_dest)))
endef
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 7128e3a..96e7e2c 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -756,11 +756,11 @@
TARGET_OUT_VENDOR := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)
.KATI_READONLY := TARGET_OUT_VENDOR
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
else
target_out_vendor_app_base := $(TARGET_OUT_VENDOR)
endif
@@ -839,11 +839,11 @@
TARGET_OUT_ODM := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ODM)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
else
target_out_odm_app_base := $(TARGET_OUT_ODM)
endif
@@ -895,11 +895,11 @@
TARGET_OUT_PRODUCT_EXECUTABLES := $(TARGET_OUT_PRODUCT)/bin
.KATI_READONLY := TARGET_OUT_PRODUCT
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
else
target_out_product_app_base := $(TARGET_OUT_PRODUCT)
endif
@@ -941,11 +941,11 @@
TARGET_OUT_PRODUCT_SERVICES := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
else
target_out_product_services_app_base := $(TARGET_OUT_PRODUCT_SERVICES)
endif
diff --git a/core/java.mk b/core/java.mk
index 6ca2904..c015e4a 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -170,6 +170,7 @@
$(filter %.java,$(LOCAL_GENERATED_SOURCES))
java_intermediate_sources := $(addprefix $(TARGET_OUT_COMMON_INTERMEDIATES)/, $(filter %.java,$(LOCAL_INTERMEDIATE_SOURCES)))
all_java_sources := $(java_sources) $(java_intermediate_sources)
+ALL_MODULES.$(my_register_name).SRCS := $(ALL_MODULES.$(my_register_name).SRCS) $(all_java_sources)
include $(BUILD_SYSTEM)/java_common.mk
diff --git a/core/product.mk b/core/product.mk
index 8c8246e..d1c74e7 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -408,7 +408,7 @@
BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE \
BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE \
BOARD_SUPER_PARTITION_SIZE \
- BOARD_SUPER_PARTITION_PARTITION_LIST \
+ BOARD_SUPER_PARTITION_GROUPS \
#
# Mark the variables in _product_stash_var_list as readonly
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index d02cba6..d34f367 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -74,6 +74,25 @@
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(LOCAL_BUILT_MODULE)))
endif
+# embedded JNI will already have been handled by soong
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH)
+ my_2nd_arch_prefix :=
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+endif
+ifdef TARGET_2ND_ARCH
+ ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH)
+ my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+ endif
+endif
+LOCAL_SHARED_JNI_LIBRARIES :=
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+my_2nd_arch_prefix :=
PACKAGES := $(PACKAGES) $(LOCAL_MODULE)
ifdef LOCAL_CERTIFICATE
diff --git a/envsetup.sh b/envsetup.sh
index 5cbd9eb..4579bef 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1594,6 +1594,25 @@
esac
}
+function acloud()
+{
+ # Let's use the built version over the prebuilt.
+ local built_acloud=${ANDROID_HOST_OUT}/bin/acloud
+ if [ -f $built_acloud ]; then
+ $built_acloud "$@"
+ return $?
+ fi
+
+ local host_os_arch=$(get_build_var HOST_PREBUILT_TAG)
+ case $host_os_arch in
+ linux-x86) "$(gettop)"/prebuilts/asuite/acloud/linux-x86/acloud "$@"
+ ;;
+ *)
+ echo "acloud is not supported on your host arch: $host_os_arch"
+ ;;
+ esac
+}
+
# Execute the contents of any vendorsetup.sh files we can find.
function source_vendorsetup() {
for dir in device vendor product; do
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index b282048..11f5fe4 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -90,6 +90,7 @@
init.rc \
input \
installd \
+ iorapd \
ip \
ip6tables \
iptables \
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index d7d1bc8..2fa5f52 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -49,7 +49,6 @@
import os
import shlex
import shutil
-import subprocess
import sys
import uuid
import zipfile
@@ -259,10 +258,11 @@
args = OPTIONS.info_dict.get("avb_dtbo_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, \
- "avbtool add_hash_footer of %s failed" % (img.name,)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to call 'avbtool add_hash_footer' for {}:\n{}".format(
+ img.name, output)
img.Write()
return img.name
@@ -451,9 +451,9 @@
assert found, 'Failed to find {}'.format(image_path)
cmd.extend(split_args)
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"avbtool make_vbmeta_image failed:\n{}".format(stdoutdata)
img.Write()
@@ -481,9 +481,9 @@
if args:
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"bpttool make_table failed:\n{}".format(stdoutdata)
img.Write()
@@ -600,12 +600,10 @@
temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- p = common.Run(care_map_gen_cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
- if OPTIONS.verbose:
- print(output.rstrip())
- assert p.returncode == 0, "Failed to generate the care_map proto message."
+ proc = common.Run(care_map_gen_cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to generate the care_map proto message:\n{}".format(output)
care_map_path = "META/care_map.pb"
if output_zip and care_map_path not in output_zip.namelist():
@@ -656,9 +654,9 @@
cmd += shlex.split(OPTIONS.info_dict.get('lpmake_args').strip())
cmd += ['--output', img.name]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"lpmake tool failed:\n{}".format(stdoutdata)
img.Write()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index aeb4379..189dba2 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -23,7 +23,6 @@
import os
import os.path
import re
-import subprocess
import sys
import threading
from collections import deque, OrderedDict
@@ -43,11 +42,10 @@
# Don't dump the bsdiff/imgdiff commands, which are not useful for the case
# here, since they contain temp filenames only.
- p = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
+ proc = common.Run(cmd, verbose=False)
+ output, _ = proc.communicate()
- if p.returncode != 0:
+ if proc.returncode != 0:
raise ValueError(output)
with open(patchfile, 'rb') as f:
@@ -1494,9 +1492,9 @@
"--block-limit={}".format(max_blocks_per_transfer),
"--split-info=" + patch_info_file,
src_file, tgt_file, patch_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- imgdiff_output, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ imgdiff_output, _ = proc.communicate()
+ assert proc.returncode == 0, \
"Failed to create imgdiff patch between {} and {}:\n{}".format(
src_name, tgt_name, imgdiff_output)
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 3cac90a..a580709 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -24,7 +24,6 @@
import re
import subprocess
import sys
-import tempfile
import zipfile
from hashlib import sha1
@@ -165,11 +164,11 @@
cmd = ['delta_generator',
'--in_file=' + payload_file,
'--public_key=' + pubkey]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
- 'Failed to verify payload with delta_generator: %s\n%s' % (package,
- stdoutdata)
+ 'Failed to verify payload with delta_generator: {}\n{}'.format(
+ package, stdoutdata)
common.ZipClose(package_zip)
# Verified successfully upon reaching here.
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 4e2346c..e381676 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -121,15 +121,26 @@
def Run(args, verbose=None, **kwargs):
- """Create and return a subprocess.Popen object.
+ """Creates and returns a subprocess.Popen object.
- Caller can specify if the command line should be printed. The global
- OPTIONS.verbose will be used if not specified.
+ Args:
+ args: The command represented as a list of strings.
+ verbose: Whether the commands should be shown (default to OPTIONS.verbose
+ if unspecified).
+ kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
+ stdin, etc. stdout and stderr will default to subprocess.PIPE and
+ subprocess.STDOUT respectively unless caller specifies any of them.
+
+ Returns:
+ A subprocess.Popen object.
"""
if verbose is None:
verbose = OPTIONS.verbose
+ if 'stdout' not in kwargs and 'stderr' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ kwargs['stderr'] = subprocess.STDOUT
if verbose:
- print(" running: ", " ".join(args))
+ print(" Running: \"{}\"".format(" ".join(args)))
return subprocess.Popen(args, **kwargs)
@@ -443,8 +454,7 @@
avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
pubkey_path = MakeTempFile(prefix="avb-", suffix=".pubkey")
proc = Run(
- [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path])
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
"Failed to extract pubkey for {}:\n{}".format(
@@ -551,9 +561,10 @@
fn = os.path.join(sourcedir, "recovery_dtbo")
cmd.extend(["--recovery_dtbo", fn])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
+ proc = Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to run mkbootimg of {}:\n{}".format(partition_name, output)
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
@@ -568,9 +579,10 @@
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "boot_signer of %s image failed" % path
+ proc = Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to run boot_signer of {} image:\n{}".format(path, output)
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot"):
@@ -588,9 +600,10 @@
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "vboot_signer of %s image failed" % path
+ proc = Run(cmd)
+ proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to run vboot_signer of {} image:\n{}".format(path, output)
# Clean up the temp files.
img_unsigned.close()
@@ -607,10 +620,11 @@
args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
- partition_name,)
+ proc = Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to run 'avbtool add_hash_footer' of {}:\n{}".format(
+ partition_name, output)
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -682,9 +696,9 @@
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.extend(pattern)
- p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- if p.returncode != 0:
+ proc = Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ if proc.returncode != 0:
raise ExternalError(
"Failed to unzip input target-files \"{}\":\n{}".format(
filename, stdoutdata))
@@ -926,15 +940,14 @@
key + OPTIONS.private_key_suffix,
input_name, output_name])
- p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ proc = Run(cmd, stdin=subprocess.PIPE)
if password is not None:
password += "\n"
- stdoutdata, _ = p.communicate(password)
- if p.returncode != 0:
+ stdoutdata, _ = proc.communicate(password)
+ if proc.returncode != 0:
raise ExternalError(
"Failed to run signapk.jar: return code {}:\n{}".format(
- p.returncode, stdoutdata))
+ proc.returncode, stdoutdata))
def CheckSize(data, target, info_dict):
@@ -1267,8 +1280,7 @@
first_line = i + 4
f.close()
- p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
- _, _ = p.communicate()
+ Run([self.editor, "+%d" % (first_line,), self.pwfile]).communicate()
return self.ReadFile()
@@ -1396,10 +1408,10 @@
if isinstance(entries, basestring):
entries = [entries]
cmd = ["zip", "-d", zip_filename] + entries
- proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = Run(cmd)
stdoutdata, _ = proc.communicate()
- assert proc.returncode == 0, "Failed to delete %s:\n%s" % (entries,
- stdoutdata)
+ assert proc.returncode == 0, \
+ "Failed to delete {}:\n{}".format(entries, stdoutdata)
def ZipClose(zip_file):
@@ -1860,9 +1872,9 @@
'--output={}.new.dat.br'.format(self.path),
'{}.new.dat'.format(self.path)]
print("Compressing {}.new.dat with brotli".format(self.partition))
- p = Run(brotli_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = Run(brotli_cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
'Failed to compress {}.new.dat with brotli:\n{}'.format(
self.partition, stdoutdata)
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 755eda9..7ea53f8 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -394,8 +394,7 @@
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
- get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ get_signing_key = common.Run(cmd, verbose=False)
stdoutdata, _ = get_signing_key.communicate()
assert get_signing_key.returncode == 0, \
"Failed to get signing key: {}".format(stdoutdata)
@@ -411,7 +410,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ signing = common.Run(cmd)
stdoutdata, _ = signing.communicate()
assert signing.returncode == 0, \
"Failed to sign the input file: {}".format(stdoutdata)
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index a73746e..cc7b887 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -16,7 +16,6 @@
import os
import os.path
-import subprocess
import unittest
import zipfile
@@ -45,9 +44,11 @@
# Calls an external binary to convert the proto message.
cmd = ["care_map_generator", "--parse_proto", file_name, text_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- p.communicate()
- self.assertEqual(0, p.returncode)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ self.assertEqual(
+ 0, proc.returncode,
+ "Failed to run care_map_generator:\n{}".format(output))
with open(text_file, 'r') as verify_fp:
plain_text = verify_fp.read()
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 1d8a786..29e0d83 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -17,7 +17,6 @@
import copy
import os
import os.path
-import subprocess
import unittest
import zipfile
@@ -1024,11 +1023,11 @@
'--signature_size', str(self.SIGNATURE_SIZE),
'--metadata_hash_file', metadata_sig_file,
'--payload_hash_file', payload_sig_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
- 'Failed to run brillo_update_payload: {}'.format(stdoutdata))
+ 'Failed to run brillo_update_payload:\n{}'.format(stdoutdata))
signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index 0aaf069..ecb7fde 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -21,7 +21,6 @@
import os
import os.path
import shutil
-import subprocess
import unittest
import build_image
@@ -44,7 +43,7 @@
kernel_fp.write(os.urandom(10))
cmd = ['mkbootimg', '--kernel', kernel, '-o', output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -53,7 +52,7 @@
cmd = ['boot_signer', '/boot', output_file,
os.path.join(self.testdata_dir, 'testkey.pk8'),
os.path.join(self.testdata_dir, 'testkey.x509.pem'), output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -123,7 +122,7 @@
system_root = common.MakeTempDir()
cmd = ['mkuserimg_mke2fs', '-s', system_root, output_file, 'ext4',
'/system', str(image_size), '-j', '0']
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 09f800f..1cc4a60 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -35,7 +35,6 @@
import logging
import os.path
import re
-import subprocess
import zipfile
import common
@@ -256,7 +255,7 @@
continue
cmd = ['boot_signer', '-verify', image_path, '-certificate', verity_key]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with boot_signer:\n{}'.format(image, stdoutdata)
@@ -299,7 +298,7 @@
continue
cmd = ['verity_verifier', image_path, '-mincrypt', verity_key_mincrypt]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
@@ -328,7 +327,7 @@
partition, info_dict, options[key_name])
cmd.extend(["--expected_chain_partition", chained_partition_arg])
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
diff --git a/tools/uuidgen.py b/tools/uuidgen.py
deleted file mode 100755
index d3091a7..0000000
--- a/tools/uuidgen.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-import sys
-import uuid
-
-def uuidgen(name):
- return uuid.uuid5(uuid.uuid5(uuid.NAMESPACE_URL, "android.com"), name)
-
-if __name__ == "__main__":
- if len(sys.argv) < 2:
- print("Usage: uuidgen.py <name>")
- sys.exit(1)
- name = sys.argv[1]
- print(uuidgen(name))
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index af04a34..eea1749 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -111,7 +111,7 @@
status = pZout->add(pZin, pEntry, padding, &pNewEntry);
}
- if (status != NO_ERROR)
+ if (status != OK)
return 1;
bias += padding;
//printf(" added '%s' at %ld (pad=%d)\n",
@@ -146,13 +146,13 @@
return 1;
}
- if (zin.open(inFileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zin.open(inFileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' as zip archive\n", inFileName);
return 1;
}
if (zout.open(outFileName,
ZipFile::kOpenReadWrite|ZipFile::kOpenCreate|ZipFile::kOpenTruncate)
- != NO_ERROR)
+ != OK)
{
fprintf(stderr, "Unable to open '%s' as zip archive\n", outFileName);
return 1;
@@ -178,7 +178,7 @@
if (verbose)
printf("Verifying alignment of %s (%d)...\n", fileName, alignment);
- if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' for verification\n", fileName);
return 1;
}
diff --git a/tools/zipalign/ZipEntry.cpp b/tools/zipalign/ZipEntry.cpp
index c3c833e..810d74a 100644
--- a/tools/zipalign/ZipEntry.cpp
+++ b/tools/zipalign/ZipEntry.cpp
@@ -48,7 +48,7 @@
/* read the CDE */
result = mCDE.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mCDE.read failed\n");
return result;
}
@@ -64,7 +64,7 @@
}
result = mLFH.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mLFH.read failed\n");
return result;
}
@@ -103,7 +103,7 @@
* can defer worrying about that to when we're extracting data.
*/
- return NO_ERROR;
+ return OK;
}
/*
@@ -189,7 +189,7 @@
mLFH.mExtraFieldLength+1);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -225,7 +225,7 @@
mLFH.mExtraFieldLength = padding;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -403,7 +403,7 @@
*/
status_t ZipEntry::LocalFileHeader::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kLFHLen];
assert(mFileName == NULL);
@@ -499,7 +499,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -537,7 +537,7 @@
*/
status_t ZipEntry::CentralDirEntry::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kCDELen];
/* no re-use */
@@ -669,7 +669,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 9e44956..63fb962 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -120,7 +120,7 @@
* have a need for empty zip files.)
*/
mNeedCDRewrite = true;
- result = NO_ERROR;
+ result = OK;
}
if (flags & kOpenReadOnly)
@@ -205,7 +205,7 @@
*/
status_t ZipFile::readCentralDir(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t* buf = NULL;
off_t fileLength, seekStart;
long readAmount;
@@ -267,7 +267,7 @@
/* extract eocd values */
result = mEOCD.readBuf(buf + i, readAmount - i);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("Failure reading %ld bytes of EOCD values", readAmount - i);
goto bail;
}
@@ -311,7 +311,7 @@
ZipEntry* pEntry = new ZipEntry;
result = pEntry->initFromCDE(mZipFp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("initFromCDE failed\n");
delete pEntry;
goto bail;
@@ -361,7 +361,7 @@
const char* storageName, int compressionMethod, ZipEntry** ppEntry)
{
ZipEntry* pEntry = NULL;
- status_t result = NO_ERROR;
+ status_t result = OK;
long lfhPosn, startPosn, endPosn, uncompressedLen;
FILE* inputFp = NULL;
uint32_t crc;
@@ -415,7 +415,7 @@
if (compressionMethod == ZipEntry::kCompressDeflated) {
bool failed = false;
result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("compression failed, storing\n");
failed = true;
} else {
@@ -447,7 +447,7 @@
} else {
result = copyDataToFp(mZipFp, data, size, &crc);
}
- if (result != NO_ERROR) {
+ if (result != OK) {
// don't need to truncate; happens in CDE rewrite
ALOGD("failed copying data in\n");
goto bail;
@@ -535,11 +535,11 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
if (padding != 0) {
result = pEntry->addPadding(padding);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
}
@@ -574,7 +574,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -603,7 +603,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -642,7 +642,7 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
/*
@@ -682,7 +682,7 @@
}
long startPosn = ftell(mZipFp);
uint32_t crc;
- if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != NO_ERROR) {
+ if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != OK) {
ALOGW("recompress of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
free(buf);
@@ -699,7 +699,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -738,7 +738,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -773,7 +773,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -793,7 +793,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -837,7 +837,7 @@
length -= readSize;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -849,7 +849,7 @@
status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
const void* data, size_t size, uint32_t* pCRC32)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
const size_t kBufSize = 1024 * 1024;
uint8_t* inBuf = NULL;
uint8_t* outBuf = NULL;
@@ -933,7 +933,7 @@
/* mark entry as deleted, and mark archive as dirty */
pEntry->setDeleted();
mNeedCDRewrite = true;
- return NO_ERROR;
+ return OK;
}
/*
@@ -944,19 +944,19 @@
*/
status_t ZipFile::flush(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
long eocdPosn;
int i, count;
if (mReadOnly)
return INVALID_OPERATION;
if (!mNeedCDRewrite)
- return NO_ERROR;
+ return OK;
assert(mZipFp != NULL);
result = crunchArchive();
- if (result != NO_ERROR)
+ if (result != OK)
return result;
if (fseek(mZipFp, mEOCD.mCentralDirOffset, SEEK_SET) != 0)
@@ -986,7 +986,7 @@
/* should we clear the "newly added" flag in all entries now? */
mNeedCDRewrite = false;
- return NO_ERROR;
+ return OK;
}
/*
@@ -997,7 +997,7 @@
*/
status_t ZipFile::crunchArchive(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
int i, count;
long delCount, adjust;
@@ -1065,7 +1065,7 @@
// pEntry->getFileName(), adjust);
result = filemove(mZipFp, pEntry->getLFHOffset() - adjust,
pEntry->getLFHOffset(), span);
- if (result != NO_ERROR) {
+ if (result != OK) {
/* this is why you use a temp file */
ALOGE("error during crunch - archive is toast\n");
return result;
@@ -1097,7 +1097,7 @@
status_t ZipFile::filemove(FILE* fp, off_t dst, off_t src, size_t n)
{
if (dst == src || n <= 0)
- return NO_ERROR;
+ return OK;
uint8_t readBuf[32768];
@@ -1140,7 +1140,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -1355,7 +1355,7 @@
memcpy(mComment, buf + kEOCDLen, mCommentLen);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -1382,7 +1382,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*