Merge commit 'f9f41d0dfdac6deb819f0cc0cb0270f504dbd4ed' into manualmergeDroiddoc
diff --git a/cleanspec.mk b/cleanspec.mk
index 22d9fe1..0cd4fed 100644
--- a/cleanspec.mk
+++ b/cleanspec.mk
@@ -75,6 +75,15 @@
$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/PinyinIMEGoogleService_intermediates)
$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/com.android.inputmethod.pinyin.lib_intermediates)
$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/PinyinIMEGoogleService_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates/src/telephony)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/*/obj)
+$(call add-clean-step, rm -f $(PRODUCT_OUT)/system/bin/tcpdump)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates/src/location)
+
+$(call add-clean-step, rm -rf $(OUT_DIR)/product/*/obj/SHARED_LIBRARIES/lib?camera_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/product/*/obj/STATIC_LIBRARIES/lib?camera_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/product/*/obj/SHARED_LIBRARIES/libwebcore_intermediates)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/core/Makefile b/core/Makefile
index 58a9695..107d0c1 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -62,9 +62,6 @@
# Apps are always signed with test keys, and may be re-signed in a post-build
# step. If that happens, the "test-keys" tag will be removed by that step.
BUILD_VERSION_TAGS += test-keys
-ifndef INCLUDE_TEST_OTA_KEYS
- BUILD_VERSION_TAGS += ota-rel-keys
-endif
BUILD_VERSION_TAGS := $(subst $(space),$(comma),$(sort $(BUILD_VERSION_TAGS)))
# A human-readable string that descibes this build in detail.
@@ -129,10 +126,12 @@
BUILD_NUMBER="$(BUILD_NUMBER)" \
PLATFORM_VERSION="$(PLATFORM_VERSION)" \
PLATFORM_SDK_VERSION="$(PLATFORM_SDK_VERSION)" \
+ PLATFORM_VERSION_CODENAME="$(PLATFORM_VERSION_CODENAME)" \
BUILD_VERSION_TAGS="$(BUILD_VERSION_TAGS)" \
TARGET_BOOTLOADER_BOARD_NAME="$(TARGET_BOOTLOADER_BOARD_NAME)" \
BUILD_FINGERPRINT="$(BUILD_FINGERPRINT)" \
TARGET_BOARD_PLATFORM="$(TARGET_BOARD_PLATFORM)" \
+ TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
bash $(BUILDINFO_SH) > $@
$(hide) if [ -f $(TARGET_DEVICE_DIR)/system.prop ]; then \
cat $(TARGET_DEVICE_DIR)/system.prop >> $@; \
@@ -227,6 +226,15 @@
"INSTALLED=\"$(strip $(ALL_MODULES.$(m).INSTALLED))\"" >> $(MODULE_INFO_FILE)))
endif
+# -----------------------------------------------------------------
+
+# The test key is used to sign this package, and as the key required
+# for future OTA packages installed by this system. Actual product
+# deliverables will be re-signed by hand. We expect this file to
+# exist with the suffixes ".x509.pem" and ".pk8".
+DEFAULT_KEY_CERT_PAIR := $(SRC_TARGET_DIR)/product/security/testkey
+
+
# Rules that need to be present for the simulator, even
# if they don't do anything.
.PHONY: systemimage
@@ -250,9 +258,9 @@
# We just build this directly to the install location.
INSTALLED_RAMDISK_TARGET := $(BUILT_RAMDISK_TARGET)
-$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES)
+$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES) | $(MINIGZIP)
$(call pretty,"Target ram disk: $@")
- $(hide) $(MKBOOTFS) $(TARGET_ROOT_OUT) | gzip > $@
+ $(hide) $(MKBOOTFS) $(TARGET_ROOT_OUT) | $(MINIGZIP) > $@
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
@@ -271,6 +279,11 @@
INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
endif
+BOARD_KERNEL_BASE := $(strip $(BOARD_KERNEL_BASE))
+ifdef BOARD_KERNEL_BASE
+ INTERNAL_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
+endif
+
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
@@ -434,12 +447,16 @@
# the module processing has already been done -- in fact, we used the
# fact that all that has been done to get the list of modules that we
# need notice files for.
-$(target_notice_file_html_gz): $(target_notice_file_html)
- gzip -c $< > $@
+$(target_notice_file_html_gz): $(target_notice_file_html) | $(MINIGZIP)
+ $(hide) $(MINIGZIP) -9 < $< > $@
installed_notice_html_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
$(installed_notice_html_gz): $(target_notice_file_html_gz) | $(ACP)
$(copy-file-to-target)
+
+# if we've been run my mm, mmm, etc, don't reinstall this every time
+ifeq ($(ONE_SHOT_MAKEFILE),)
ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_gz)
+endif
# The kernel isn't really a module, so to get its module file in there, we
# make the target NOTICE files depend on this particular file too, which will
@@ -452,6 +469,23 @@
$(hide) $(ACP) $< $@
+# -----------------------------------------------------------------
+# Build a keystore with the authorized keys in it, used to verify the
+# authenticity of downloaded OTA packages.
+#
+# This rule adds to ALL_DEFAULT_INSTALLED_MODULES, so it needs to come
+# before the rules that use that variable to build the image.
+ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/security/otacerts.zip
+$(TARGET_OUT_ETC)/security/otacerts.zip: KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+$(TARGET_OUT_ETC)/security/otacerts.zip: $(addsuffix .x509.pem,$(DEFAULT_KEY_CERT_PAIR))
+ $(hide) rm -f $@
+ $(hide) mkdir -p $(dir $@)
+ $(hide) zip -qj $@ $<
+
+.PHONY: otacerts
+otacerts: $(TARGET_OUT_ETC)/security/otacerts.zip
+
+
# #################################################################
# Targets for user images
# #################################################################
@@ -464,6 +498,95 @@
endif
# -----------------------------------------------------------------
+# Recovery image
+
+# If neither TARGET_NO_KERNEL nor TARGET_NO_RECOVERY are true
+ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
+
+INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
+
+recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
+recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
+recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
+recovery_build_prop := $(INSTALLED_BUILD_PROP_TARGET)
+recovery_binary := $(call intermediates-dir-for,EXECUTABLES,recovery)/recovery
+recovery_resources_common := $(call include-path-for, recovery)/res
+recovery_resources_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/res))
+recovery_resource_deps := $(shell find $(recovery_resources_common) \
+ $(recovery_resources_private) -type f)
+
+ifeq ($(recovery_resources_private),)
+ $(info No private recovery resources for TARGET_DEVICE $(TARGET_DEVICE))
+endif
+
+INTERNAL_RECOVERYIMAGE_ARGS := \
+ $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
+ --kernel $(recovery_kernel) \
+ --ramdisk $(recovery_ramdisk)
+
+# Assumes this has already been stripped
+ifdef BOARD_KERNEL_CMDLINE
+ INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
+endif
+ifdef BOARD_KERNEL_BASE
+ INTERNAL_RECOVERYIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
+endif
+
+# Keys authorized to sign OTA packages this build will accept. The
+# build always uses test-keys for this; release packaging tools will
+# substitute other keys for this one.
+OTA_PUBLIC_KEYS := $(SRC_TARGET_DIR)/product/security/testkey.x509.pem
+
+# Generate a file containing the keys that will be read by the
+# recovery binary.
+RECOVERY_INSTALL_OTA_KEYS := \
+ $(call intermediates-dir-for,PACKAGING,ota_keys)/keys
+DUMPKEY_JAR := $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar
+$(RECOVERY_INSTALL_OTA_KEYS): PRIVATE_OTA_PUBLIC_KEYS := $(OTA_PUBLIC_KEYS)
+$(RECOVERY_INSTALL_OTA_KEYS): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR)
+ @echo "DumpPublicKey: $@ <= $(PRIVATE_OTA_PUBLIC_KEYS)"
+ @rm -rf $@
+ @mkdir -p $(dir $@)
+ java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) > $@
+
+$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \
+ $(INSTALLED_RAMDISK_TARGET) \
+ $(INSTALLED_BOOTIMAGE_TARGET) \
+ $(recovery_binary) \
+ $(recovery_initrc) $(recovery_kernel) \
+ $(INSTALLED_2NDBOOTLOADER_TARGET) \
+ $(recovery_build_prop) $(recovery_resource_deps) \
+ $(RECOVERY_INSTALL_OTA_KEYS)
+ @echo ----- Making recovery image ------
+ rm -rf $(TARGET_RECOVERY_OUT)
+ mkdir -p $(TARGET_RECOVERY_OUT)
+ mkdir -p $(TARGET_RECOVERY_ROOT_OUT)
+ mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc
+ mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/tmp
+ echo Copying baseline ramdisk...
+ cp -R $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
+ echo Modifying ramdisk contents...
+ cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
+ cp -f $(recovery_binary) $(TARGET_RECOVERY_ROOT_OUT)/sbin/
+ cp -rf $(recovery_resources_common) $(TARGET_RECOVERY_ROOT_OUT)/
+ $(foreach item,$(recovery_resources_private), \
+ cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/)
+ cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys
+ cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
+ > $(TARGET_RECOVERY_ROOT_OUT)/default.prop
+ $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk)
+ $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) --output $@
+ @echo ----- Made recovery image -------- $@
+ $(hide) $(call assert-max-file-size,$@,$(BOARD_RECOVERYIMAGE_MAX_SIZE))
+
+else
+INSTALLED_RECOVERYIMAGE_TARGET :=
+endif
+
+.PHONY: recoveryimage
+recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET)
+
+# -----------------------------------------------------------------
# system yaffs image
#
# First, the "unoptimized" image, which contains .apk/.jar files
@@ -518,10 +641,10 @@
SYSTEMIMAGE_SOURCE_DIR := $(TARGET_OUT)
endif
-$(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) | $(ACP)
+$(INSTALLED_SYSTEMIMAGE): $(BUILT_SYSTEMIMAGE) $(INSTALLED_RECOVERYIMAGE_TARGET) | $(ACP)
@echo "Install system fs image: $@"
$(copy-file-to-target)
- $(hide) $(call assert-max-file-size,$@,$(BOARD_SYSTEMIMAGE_MAX_SIZE))
+ $(hide) $(call assert-max-file-size,$@ $(INSTALLED_RECOVERYIMAGE_TARGET),$(BOARD_SYSTEMIMAGE_MAX_SIZE))
systemimage: $(INSTALLED_SYSTEMIMAGE)
@@ -614,72 +737,6 @@
$(build-userdatatarball-target)
-# If neither TARGET_NO_KERNEL nor TARGET_NO_RECOVERY are true
-ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
-
-# -----------------------------------------------------------------
-# Recovery image
-INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
-
-recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
-recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
-recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
-recovery_build_prop := $(INSTALLED_BUILD_PROP_TARGET)
-recovery_binary := $(call intermediates-dir-for,EXECUTABLES,recovery)/recovery
-recovery_resources_common := $(call include-path-for, recovery)/res
-recovery_resources_private := $(strip $(wildcard $(TARGET_DEVICE_DIR)/recovery/res))
-recovery_resource_deps := $(shell find $(recovery_resources_common) \
- $(recovery_resources_private) -type f)
-
-ifeq ($(recovery_resources_private),)
- $(info No private recovery resources for TARGET_DEVICE $(TARGET_DEVICE))
-endif
-
-INTERNAL_RECOVERYIMAGE_ARGS := \
- $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
- --kernel $(recovery_kernel) \
- --ramdisk $(recovery_ramdisk)
-
-# Assumes this has already been stripped
-ifdef BOARD_KERNEL_CMDLINE
- INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(BOARD_KERNEL_CMDLINE)"
-endif
-
-$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) \
- $(INSTALLED_RAMDISK_TARGET) \
- $(INSTALLED_BOOTIMAGE_TARGET) \
- $(recovery_binary) \
- $(recovery_initrc) $(recovery_kernel) \
- $(INSTALLED_2NDBOOTLOADER_TARGET) \
- $(recovery_build_prop) $(recovery_resource_deps)
- @echo ----- Making recovery image ------
- rm -rf $(TARGET_RECOVERY_OUT)
- mkdir -p $(TARGET_RECOVERY_OUT)
- mkdir -p $(TARGET_RECOVERY_ROOT_OUT)
- mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc
- mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/tmp
- echo Copying baseline ramdisk...
- cp -R $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
- echo Modifying ramdisk contents...
- cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
- cp -f $(recovery_binary) $(TARGET_RECOVERY_ROOT_OUT)/sbin/
- cp -rf $(recovery_resources_common) $(TARGET_RECOVERY_ROOT_OUT)/
- $(foreach item,$(recovery_resources_private), \
- cp -rf $(item) $(TARGET_RECOVERY_ROOT_OUT)/)
- cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
- > $(TARGET_RECOVERY_ROOT_OUT)/default.prop
- $(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | gzip > $(recovery_ramdisk)
- $(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) --output $@
- @echo ----- Made recovery image -------- $@
- $(hide) $(call assert-max-file-size,$@,$(BOARD_RECOVERYIMAGE_MAX_SIZE))
-
-else
-INSTALLED_RECOVERYIMAGE_TARGET :=
-endif
-
-.PHONY: recoveryimage
-recoveryimage: $(INSTALLED_RECOVERYIMAGE_TARGET)
-
# -----------------------------------------------------------------
# bring in the installer image generation defines if necessary
ifeq ($(TARGET_USE_DISKINSTALLER),true)
@@ -687,123 +744,20 @@
endif
# -----------------------------------------------------------------
-# OTA update package
-name := $(TARGET_PRODUCT)
-ifeq ($(TARGET_BUILD_TYPE),debug)
- name := $(name)_debug
-endif
-name := $(name)-ota-$(FILE_NAME_TAG)
+# host tools needed to build OTA packages
-INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-INTERNAL_OTA_INTERMEDIATES_DIR := $(call intermediates-dir-for,PACKAGING,ota)
-
-# If neither TARGET_NO_KERNEL nor TARGET_NO_RECOVERY are true
-ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
-INTERNAL_OTA_RECOVERYIMAGE_TARGET := $(INTERNAL_OTA_INTERMEDIATES_DIR)/system/recovery.img
-else
-INTERNAL_OTA_RECOVERYIMAGE_TARGET :=
-endif
-INTERNAL_OTA_SCRIPT_TARGET := $(INTERNAL_OTA_INTERMEDIATES_DIR)/META-INF/com/google/android/update-script
-
-# Sign OTA packages with the test key by default.
-# Actual product deliverables will be re-signed by hand.
-private_key := $(SRC_TARGET_DIR)/product/security/testkey.pk8
-certificate := $(SRC_TARGET_DIR)/product/security/testkey.x509.pem
-$(INTERNAL_OTA_PACKAGE_TARGET): $(private_key) $(certificate) $(SIGNAPK_JAR)
-$(INTERNAL_OTA_PACKAGE_TARGET): PRIVATE_PRIVATE_KEY := $(private_key)
-$(INTERNAL_OTA_PACKAGE_TARGET): PRIVATE_CERTIFICATE := $(certificate)
-
-# Depending on INSTALLED_SYSTEMIMAGE guarantees that SYSTEMIMAGE_SOURCE_DIR
-# is up-to-date. We use jar instead of zip so that we can use the -C
-# switch to avoid cd-ing all over the place.
-# TODO: Make our own jar-creation tool to avoid all these shenanigans.
-$(INTERNAL_OTA_PACKAGE_TARGET): \
- $(INTERNAL_OTA_SCRIPT_TARGET) \
- $(INTERNAL_OTA_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_BOOTIMAGE_TARGET) \
- $(INSTALLED_RADIOIMAGE_TARGET) \
- $(INSTALLED_ANDROID_INFO_TXT_TARGET) \
- $(INSTALLED_SYSTEMIMAGE)
- @echo "Package OTA: $@"
- $(hide) rm -rf $@
- $(hide) jar cf $@ \
- $(foreach item, \
- $(INSTALLED_BOOTIMAGE_TARGET) \
- $(INSTALLED_RADIOIMAGE_TARGET) \
- $(INSTALLED_ANDROID_INFO_TXT_TARGET), \
- -C $(dir $(item)) $(notdir $(item))) \
- -C $(INTERNAL_OTA_INTERMEDIATES_DIR) .
- $(hide) find $(SYSTEMIMAGE_SOURCE_DIR) -type f -print | \
- sed 's|^$(dir $(SYSTEMIMAGE_SOURCE_DIR))|-C & |' | \
- xargs jar uf $@
- $(hide) if jar tf $@ | egrep '.{65}' >&2; then \
- echo "Path too long (>64 chars) for OTA update" >&2; \
- exit 1; \
- fi
- $(sign-package)
-
-$(INTERNAL_OTA_SCRIPT_TARGET): \
- $(HOST_OUT_EXECUTABLES)/make-update-script \
- $(INSTALLED_ANDROID_INFO_TXT_TARGET) \
- $(INSTALLED_SYSTEMIMAGE)
- @mkdir -p $(dir $@)
- @rm -rf $@
- @echo "Update script: $@"
- $(hide) TARGET_DEVICE=$(TARGET_DEVICE) \
- $< $(SYSTEMIMAGE_SOURCE_DIR) \
- $(INSTALLED_ANDROID_INFO_TXT_TARGET) \
- > $@
-
-ifneq (,$(INTERNAL_OTA_RECOVERYIMAGE_TARGET))
-# This copy is so recovery.img can be in /system within the OTA package.
-# That way it gets installed into the system image, which in turn installs it.
-$(INTERNAL_OTA_RECOVERYIMAGE_TARGET): $(INSTALLED_RECOVERYIMAGE_TARGET) | $(ACP)
- @mkdir -p $(dir $@)
- $(hide) $(ACP) $< $@
-endif
-
-.PHONY: otapackage
-otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)
-
-# Keys authorized to sign OTA packages this build will accept.
-ifeq ($(INCLUDE_TEST_OTA_KEYS),true)
- OTA_PUBLIC_KEYS := \
- $(sort $(SRC_TARGET_DIR)/product/security/testkey.x509.pem $(OTA_PUBLIC_KEYS))
-endif
-
-ifeq ($(OTA_PUBLIC_KEYS),)
- $(error No OTA_PUBLIC_KEYS defined)
-endif
-
-# Build a keystore with the authorized keys in it.
-# java/android/android/server/checkin/UpdateVerifier.java uses this.
-ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT_ETC)/security/otacerts.zip
-$(TARGET_OUT_ETC)/security/otacerts.zip: $(OTA_PUBLIC_KEYS)
- $(hide) rm -f $@
- $(hide) mkdir -p $(dir $@)
- zip -qj $@ $(OTA_PUBLIC_KEYS)
-
-# The device does not support JKS.
-# $(hide) for f in $(OTA_PUBLIC_KEYS); do \
-# echo "keytool: $@ <= $$f" && \
-# keytool -keystore $@ -storepass $(notdir $@) -noprompt \
-# -import -file $$f -alias $(notdir $$f) || exit 1; \
-# done
-
-ifdef RECOVERY_INSTALL_OTA_KEYS_INC
-# Generate a C-includable file containing the keys.
-# RECOVERY_INSTALL_OTA_KEYS_INC is defined by recovery/Android.mk.
-# *** THIS IS A TOTAL HACK; EXECUTABLES MUST NOT CHANGE BETWEEN DIFFERENT
-# PRODUCTS/BUILD TYPES. ***
-# TODO: make recovery read the keys from an external file.
-DUMPKEY_JAR := $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar
-$(RECOVERY_INSTALL_OTA_KEYS_INC): PRIVATE_OTA_PUBLIC_KEYS := $(OTA_PUBLIC_KEYS)
-$(RECOVERY_INSTALL_OTA_KEYS_INC): $(OTA_PUBLIC_KEYS) $(DUMPKEY_JAR)
- @echo "DumpPublicKey: $@ <= $(PRIVATE_OTA_PUBLIC_KEYS)"
- @rm -rf $@
- @mkdir -p $(dir $@)
- $(hide) java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) > $@
-endif
+.PHONY: otatools
+otatools: $(HOST_OUT_EXECUTABLES)/minigzip \
+ $(HOST_OUT_EXECUTABLES)/mkbootfs \
+ $(HOST_OUT_EXECUTABLES)/mkbootimg \
+ $(HOST_OUT_EXECUTABLES)/fs_config \
+ $(HOST_OUT_EXECUTABLES)/mkyaffs2image \
+ $(HOST_OUT_EXECUTABLES)/zipalign \
+ $(HOST_OUT_EXECUTABLES)/aapt \
+ $(HOST_OUT_EXECUTABLES)/bsdiff \
+ $(HOST_OUT_EXECUTABLES)/imgdiff \
+ $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar \
+ $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar
# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
@@ -833,21 +787,22 @@
endef
built_ota_tools := \
- $(call intermediates-dir-for,EXECUTABLES,applypatch)/applypatch \
- $(call intermediates-dir-for,EXECUTABLES,check_prereq)/check_prereq
+ $(call intermediates-dir-for,EXECUTABLES,applypatch)/applypatch \
+ $(call intermediates-dir-for,EXECUTABLES,check_prereq)/check_prereq \
+ $(call intermediates-dir-for,EXECUTABLES,updater)/updater
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_OTA_TOOLS := $(built_ota_tools)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_API_VERSION := $(RECOVERY_API_VERSION)
+
# Depending on the various images guarantees that the underlying
# directories are up-to-date.
$(BUILT_TARGET_FILES_PACKAGE): \
- $(INTERNAL_OTA_SCRIPT_TARGET) \
$(INSTALLED_BOOTIMAGE_TARGET) \
$(INSTALLED_RADIOIMAGE_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(BUILT_SYSTEMIMAGE) \
+ $(INSTALLED_SYSTEMIMAGE) \
$(INSTALLED_USERDATAIMAGE_TARGET) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
- $(INTERNAL_OTA_SCRIPT_TARGET) \
$(built_ota_tools) \
$(APKCERTS_FILE) \
| $(ACP)
@@ -895,20 +850,54 @@
$(TARGET_OUT_DATA),$(zip_root)/DATA)
@# Extra contents of the OTA package
$(hide) mkdir -p $(zip_root)/OTA/bin
- $(hide) $(call package_files-copy-root, \
- $(INTERNAL_OTA_INTERMEDIATES_DIR),$(zip_root)/OTA)
$(hide) $(ACP) $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
$(hide) $(ACP) $(PRIVATE_OTA_TOOLS) $(zip_root)/OTA/bin/
- @# Files that don't end up in any images, but are necessary to
+ @# Files that do not end up in any images, but are necessary to
@# build them.
$(hide) mkdir -p $(zip_root)/META
$(hide) $(ACP) $(APKCERTS_FILE) $(zip_root)/META/apkcerts.txt
+ $(hide) echo "$(PRODUCT_OTA_PUBLIC_KEYS)" > $(zip_root)/META/otakeys.txt
+ $(hide) echo "$(PRIVATE_RECOVERY_API_VERSION)" > $(zip_root)/META/recovery-api-version.txt
+ $(hide) echo "blocksize $(BOARD_FLASH_BLOCK_SIZE)" > $(zip_root)/META/imagesizes.txt
+ $(hide) echo "boot $(BOARD_BOOTIMAGE_MAX_SIZE)" >> $(zip_root)/META/imagesizes.txt
+ $(hide) echo "recovery $(BOARD_RECOVERYIMAGE_MAX_SIZE)" >> $(zip_root)/META/imagesizes.txt
+ $(hide) echo "system $(BOARD_SYSTEMIMAGE_MAX_SIZE)" >> $(zip_root)/META/imagesizes.txt
+ $(hide) echo "userdata $(BOARD_USERDATAIMAGE_MAX_SIZE)" >> $(zip_root)/META/imagesizes.txt
@# Zip everything up, preserving symlinks
$(hide) (cd $(zip_root) && zip -qry ../$(notdir $@) .)
target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
# -----------------------------------------------------------------
+# OTA update package
+
+ifneq ($(TARGET_SIMULATOR),true)
+ifneq ($(TARGET_PRODUCT),sdk)
+
+name := $(TARGET_PRODUCT)
+ifeq ($(TARGET_BUILD_TYPE),debug)
+ name := $(name)_debug
+endif
+name := $(name)-ota-$(FILE_NAME_TAG)
+
+INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+
+$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) otatools
+ @echo "Package OTA: $@"
+ $(hide) ./build/tools/releasetools/ota_from_target_files \
+ -p $(HOST_OUT) \
+ -k $(KEY_CERT_PAIR) \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
+
+.PHONY: otapackage
+otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)
+
+endif # TARGET_PRODUCT != sdk
+endif # TARGET_SIMULATOR != true
+
+# -----------------------------------------------------------------
# installed file list
# Depending on $(INSTALLED_SYSTEMIMAGE) ensures that it
# gets the DexOpt one if we're doing that.
@@ -1009,14 +998,8 @@
# -----------------------------------------------------------------
# The update package
-INTERNAL_UPDATE_PACKAGE_FILES += \
- $(INSTALLED_BOOTIMAGE_TARGET) \
- $(INSTALLED_RECOVERYIMAGE_TARGET) \
- $(INSTALLED_SYSTEMIMAGE) \
- $(INSTALLED_USERDATAIMAGE_TARGET) \
- $(INSTALLED_ANDROID_INFO_TXT_TARGET)
-
-ifneq ($(strip $(INTERNAL_UPDATE_PACKAGE_FILES)),)
+ifneq ($(TARGET_SIMULATOR),true)
+ifneq ($(TARGET_PRODUCT),sdk)
name := $(TARGET_PRODUCT)
ifeq ($(TARGET_BUILD_TYPE),debug)
@@ -1026,13 +1009,17 @@
INTERNAL_UPDATE_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
-$(INTERNAL_UPDATE_PACKAGE_TARGET): $(INTERNAL_UPDATE_PACKAGE_FILES)
+$(INTERNAL_UPDATE_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) otatools
@echo "Package: $@"
- $(hide) zip -qj $@ $(INTERNAL_UPDATE_PACKAGE_FILES)
+ $(hide) ./build/tools/releasetools/img_from_target_files \
+ -p $(HOST_OUT) \
+ $(BUILT_TARGET_FILES_PACKAGE) $@
-else
-INTERNAL_UPDATE_PACKAGE_TARGET :=
-endif
+.PHONY: updatepackage
+updatepackage: $(INTERNAL_UPDATE_PACKAGE_TARGET)
+
+endif # TARGET_PRODUCT != sdk
+endif # TARGET_SIMULATOR != true
# -----------------------------------------------------------------
# The emulator package
@@ -1116,7 +1103,9 @@
$(target_notice_file_txt) \
$(tools_notice_file_txt) \
$(OUT_DOCS)/offline-sdk-timestamp \
- $(INTERNAL_UPDATE_PACKAGE_TARGET) \
+ $(INSTALLED_SYSTEMIMAGE) \
+ $(INSTALLED_USERDATAIMAGE_TARGET) \
+ $(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_SDK_BUILD_PROP_TARGET) \
$(ATREE_FILES) \
$(atree_dir)/sdk.atree \
diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt
index c277ecd..d723a19 100644
--- a/core/apicheck_msg_current.txt
+++ b/core/apicheck_msg_current.txt
@@ -6,12 +6,11 @@
1) You can add "@hide" javadoc comments to the methods, etc. listed in the
errors above.
- 2) You can update current.xml by executing the following commands:
+ 2) You can update current.xml by executing the following command:
- p4 edit frameworks/base/api/current.xml
make update-api
- To check in the revised current.xml, you will need OWNERS approval.
+ To check in the revised current.xml, you will need approval from the android API council.
******************************
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 4ee2985..a6bf504 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -267,7 +267,6 @@
JAVA_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE))/javalib.jar)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_INSTALL_DIR := $(dir $(LOCAL_INSTALLED_MODULE))
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_INTERMEDIATES_DIR := $(intermediates)
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CLASS_INTERMEDIATES_DIR := $(intermediates)/classes
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SOURCE_INTERMEDIATES_DIR := $(intermediates)/src
$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JAVA_SOURCES := $(all_java_sources)
@@ -357,6 +356,8 @@
$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_IS_HOST_MODULE := $(LOCAL_IS_HOST_MODULE)
$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_HOST:= $(my_host)
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_INTERMEDIATES_DIR:= $(intermediates)
+
# Tell the module and all of its sub-modules who it is.
$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_MODULE:= $(LOCAL_MODULE)
@@ -391,6 +392,25 @@
endif # !LOCAL_UNINSTALLABLE_MODULE
+
+###########################################################
+## CHECK_BUILD goals
+###########################################################
+
+# If nobody has defined a more specific module for the
+# checked modules, use LOCAL_BUILT_MODULE. This was old
+# behavior, so it should be a safe default.
+ifndef LOCAL_CHECKED_MODULE
+ LOCAL_CHECKED_MODULE := $(LOCAL_BUILT_MODULE)
+endif
+
+# If they request that this module not be checked, then don't.
+# PLEASE DON'T SET THIS. ANY PLACES THAT SET THIS WITHOUT
+# GOOD REASON WILL HAVE IT REMOVED.
+ifdef LOCAL_DONT_CHECK_MODULE
+ LOCAL_CHECKED_MODULE :=
+endif
+
###########################################################
## Register with ALL_MODULES
###########################################################
@@ -403,6 +423,8 @@
$(ALL_MODULES.$(LOCAL_MODULE).PATH) $(LOCAL_PATH)
ALL_MODULES.$(LOCAL_MODULE).TAGS := \
$(ALL_MODULES.$(LOCAL_MODULE).TAGS) $(LOCAL_MODULE_TAGS)
+ALL_MODULES.$(LOCAL_MODULE).CHECKED := \
+ $(ALL_MODULES.$(LOCAL_MODULE).CHECKED) $(LOCAL_CHECKED_MODULE)
ALL_MODULES.$(LOCAL_MODULE).BUILT := \
$(ALL_MODULES.$(LOCAL_MODULE).BUILT) $(LOCAL_BUILT_MODULE)
ALL_MODULES.$(LOCAL_MODULE).INSTALLED := \
@@ -429,9 +451,6 @@
$(foreach tag,$(LOCAL_MODULE_TAGS),\
$(eval ALL_MODULE_NAME_TAGS.$(tag) += $(LOCAL_MODULE)))
-# Always build everything, but only install a subset.
-ALL_BUILT_MODULES += $(LOCAL_BUILT_MODULE)
-
###########################################################
## NOTICE files
###########################################################
diff --git a/core/binary.mk b/core/binary.mk
index 0f35d3f..ddcdc6f 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -47,11 +47,11 @@
arm_objects_mode := $(if $(LOCAL_ARM_MODE),$(LOCAL_ARM_MODE),arm)
normal_objects_mode := $(if $(LOCAL_ARM_MODE),$(LOCAL_ARM_MODE),thumb)
-# Read the values from something like TARGET_arm_release_CFLAGS or
-# TARGET_thumb_debug_CFLAGS. HOST_(arm|thumb)_(release|debug)_CFLAGS
-# values aren't actually used (although they are usually empty).
-arm_objects_cflags := $($(my_prefix)$(arm_objects_mode)_$($(my_prefix)BUILD_TYPE)_CFLAGS)
-normal_objects_cflags := $($(my_prefix)$(normal_objects_mode)_$($(my_prefix)BUILD_TYPE)_CFLAGS)
+# Read the values from something like TARGET_arm_CFLAGS or
+# TARGET_thumb_CFLAGS. HOST_(arm|thumb)_CFLAGS values aren't
+# actually used (although they are usually empty).
+arm_objects_cflags := $($(my_prefix)$(arm_objects_mode)_CFLAGS)
+normal_objects_cflags := $($(my_prefix)$(normal_objects_mode)_CFLAGS)
###########################################################
## Define per-module debugging flags. Users can turn on
@@ -212,6 +212,19 @@
endif
###########################################################
+## ObjC: Compile .m files to .o
+###########################################################
+
+objc_sources := $(filter %.m,$(LOCAL_SRC_FILES))
+objc_objects := $(addprefix $(intermediates)/,$(objc_sources:.m=.o))
+
+ifneq ($(strip $(objc_objects)),)
+$(objc_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.m $(yacc_cpps) $(PRIVATE_ADDITIONAL_DEPENDENCIES)
+ $(transform-$(PRIVATE_HOST)m-to-o)
+-include $(objc_objects:%.o=%.P)
+endif
+
+###########################################################
## AS: Compile .S files to .o.
###########################################################
diff --git a/core/build_id.mk b/core/build_id.mk
index cb18bc4..060c9b5 100644
--- a/core/build_id.mk
+++ b/core/build_id.mk
@@ -23,7 +23,7 @@
# (like "TC1-RC5"). It must be a single word, and is
# capitalized by convention.
#
-BUILD_ID := CUPCAKE
+BUILD_ID := Donut
# DISPLAY_BUILD_NUMBER should only be set for development branches,
# If set, the BUILD_NUMBER (cl) is appended to the BUILD_ID for
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index b2e95b4..031bc0b 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -5,6 +5,8 @@
LOCAL_MODULE:=
LOCAL_MODULE_PATH:=
LOCAL_MODULE_STEM:=
+LOCAL_DONT_CHECK_MODULE:=
+LOCAL_CHECKED_MODULE:=
LOCAL_BUILT_MODULE:=
LOCAL_BUILT_MODULE_STEM:=
OVERRIDE_BUILT_MODULE_PATH:=
diff --git a/core/combo/linux-arm.mk b/core/combo/linux-arm.mk
index fa5f67e..edd2df4 100644
--- a/core/combo/linux-arm.mk
+++ b/core/combo/linux-arm.mk
@@ -15,32 +15,31 @@
$(combo_target)NO_UNDEFINED_LDFLAGS := -Wl,--no-undefined
-TARGET_arm_release_CFLAGS := -O2 \
- -fomit-frame-pointer \
- -fstrict-aliasing \
- -funswitch-loops \
- -finline-limit=300
+TARGET_arm_CFLAGS := -O2 \
+ -fomit-frame-pointer \
+ -fstrict-aliasing \
+ -funswitch-loops \
+ -finline-limit=300
-TARGET_thumb_release_CFLAGS := -mthumb \
- -Os \
- -fomit-frame-pointer \
- -fno-strict-aliasing \
- -finline-limit=64
+TARGET_thumb_CFLAGS := -mthumb \
+ -Os \
+ -fomit-frame-pointer \
+ -fno-strict-aliasing \
+ -finline-limit=64
-# When building for debug, compile everything as arm.
-TARGET_arm_debug_CFLAGS := $(TARGET_arm_release_CFLAGS) -fno-omit-frame-pointer -fno-strict-aliasing
-TARGET_thumb_debug_CFLAGS := $(TARGET_thumb_release_CFLAGS) -marm -fno-omit-frame-pointer
-
-# NOTE: if you try to build a debug build with thumb, several
+# Set FORCE_ARM_DEBUGGING to "true" in your buildspec.mk
+# or in your environment to force a full arm build, even for
+# files that are normally built as thumb; this can make
+# gdb debugging easier. Don't forget to do a clean build.
+#
+# NOTE: if you try to build a -O0 build with thumb, several
# of the libraries (libpv, libwebcore, libkjs) need to be built
# with -mlong-calls. When built at -O0, those libraries are
# too big for a thumb "BL <label>" to go from one end to the other.
-
-## As hopefully a temporary hack,
-## use this to force a full ARM build (for easier debugging in gdb)
-## (don't forget to do a clean build)
-##TARGET_arm_release_CFLAGS := $(TARGET_arm_release_CFLAGS) -fno-omit-frame-pointer
-##TARGET_thumb_release_CFLAGS := $(TARGET_thumb_release_CFLAGS) -marm -fno-omit-frame-pointer
+ifeq ($(FORCE_ARM_DEBUGGING),true)
+ TARGET_arm_CFLAGS += -fno-omit-frame-pointer
+ TARGET_thumb_CFLAGS += -marm -fno-omit-frame-pointer
+endif
## on some hosts, the target cross-compiler is not available so do not run this command
ifneq ($(wildcard $($(combo_target)CC)),)
diff --git a/core/combo/linux-x86.mk b/core/combo/linux-x86.mk
index 372c63e..f466147 100644
--- a/core/combo/linux-x86.mk
+++ b/core/combo/linux-x86.mk
@@ -10,7 +10,7 @@
ifeq ($(combo_target),HOST_)
# $(1): The file to check
define get-file-size
-stat --format "%s" "$(1)"
+stat --format "%s" "$(1)" | tr -d '\n'
endef
endif
diff --git a/core/combo/select.mk b/core/combo/select.mk
index c54da22..273b660 100644
--- a/core/combo/select.mk
+++ b/core/combo/select.mk
@@ -7,7 +7,6 @@
# $(combo_target)OS -- standard name for this host (LINUX, DARWIN, etc.)
# $(combo_target)ARCH -- standard name for process architecture (powerpc, x86, etc.)
# $(combo_target)GLOBAL_CFLAGS -- C compiler flags to use for everything
-# $(combo_target)DEBUG_CFLAGS -- additional C compiler flags for debug builds
# $(combo_target)RELEASE_CFLAGS -- additional C compiler flags for release builds
# $(combo_target)GLOBAL_ARFLAGS -- flags to use for static linking everything
# $(combo_target)SHLIB_SUFFIX -- suffix of shared libraries
@@ -39,7 +38,6 @@
# These flags might (will) be overridden by the target makefiles
$(combo_target)GLOBAL_CFLAGS := -fno-exceptions -Wno-multichar
-$(combo_target)DEBUG_CFLAGS := -O0 -g
$(combo_target)RELEASE_CFLAGS := -O2 -g -fno-strict-aliasing
$(combo_target)GLOBAL_ARFLAGS := crs
diff --git a/core/config.mk b/core/config.mk
index 90a40a7..b705de5 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -76,12 +76,10 @@
# These can be changed to modify both host and device modules.
COMMON_GLOBAL_CFLAGS:= -DANDROID -fmessage-length=0 -W -Wall -Wno-unused
-COMMON_DEBUG_CFLAGS:=
COMMON_RELEASE_CFLAGS:= -DNDEBUG -UDEBUG
-COMMON_GLOBAL_CPPFLAGS:=
-COMMON_DEBUG_CPPFLAGS:=
-COMMON_RELEASE_CPPFLAGS:=
+COMMON_GLOBAL_CPPFLAGS:= -DANDROID -fmessage-length=0 -W -Wall -Wno-unused -Wnon-virtual-dtor
+COMMON_RELEASE_CPPFLAGS:= -DNDEBUG -UDEBUG
# Set the extensions used for various packages
COMMON_PACKAGE_SUFFIX := .zip
@@ -158,6 +156,7 @@
ICUDATA := $(HOST_OUT_EXECUTABLES)/icudata$(HOST_EXECUTABLE_SUFFIX)
SIGNAPK_JAR := $(HOST_OUT_JAVA_LIBRARIES)/signapk$(COMMON_JAVA_PACKAGE_SUFFIX)
MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
+MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
MKYAFFS2 := $(HOST_OUT_EXECUTABLES)/mkyaffs2image$(HOST_EXECUTABLE_SUFFIX)
APICHECK := $(HOST_OUT_EXECUTABLES)/apicheck$(HOST_EXECUTABLE_SUFFIX)
@@ -227,19 +226,15 @@
# ###############################################################
HOST_GLOBAL_CFLAGS += $(COMMON_GLOBAL_CFLAGS)
-HOST_DEBUG_CFLAGS += $(COMMON_DEBUG_CFLAGS)
HOST_RELEASE_CFLAGS += $(COMMON_RELEASE_CFLAGS)
HOST_GLOBAL_CPPFLAGS += $(COMMON_GLOBAL_CPPFLAGS)
-HOST_DEBUG_CPPFLAGS += $(COMMON_DEBUG_CPPFLAGS)
HOST_RELEASE_CPPFLAGS += $(COMMON_RELEASE_CPPFLAGS)
TARGET_GLOBAL_CFLAGS += $(COMMON_GLOBAL_CFLAGS)
-TARGET_DEBUG_CFLAGS += $(COMMON_DEBUG_CFLAGS)
TARGET_RELEASE_CFLAGS += $(COMMON_RELEASE_CFLAGS)
TARGET_GLOBAL_CPPFLAGS += $(COMMON_GLOBAL_CPPFLAGS)
-TARGET_DEBUG_CPPFLAGS += $(COMMON_DEBUG_CPPFLAGS)
TARGET_RELEASE_CPPFLAGS += $(COMMON_RELEASE_CPPFLAGS)
HOST_GLOBAL_LD_DIRS += -L$(HOST_OUT_INTERMEDIATE_LIBRARIES)
@@ -250,7 +245,7 @@
# Many host compilers don't support these flags, so we have to make
# sure to only specify them for the target compilers checked in to
-# the source tree. The simulator uses the target flags but the
+# the source tree. The simulator passes the target flags to the
# host compiler, so only set them for the target when the target
# is not the simulator.
ifneq ($(TARGET_SIMULATOR),true)
@@ -258,21 +253,11 @@
TARGET_GLOBAL_CPPFLAGS += $(TARGET_ERROR_FLAGS)
endif
-ifeq ($(HOST_BUILD_TYPE),release)
-HOST_GLOBAL_CFLAGS+= $(HOST_RELEASE_CFLAGS)
-HOST_GLOBAL_CPPFLAGS+= $(HOST_RELEASE_CPPFLAGS)
-else
-HOST_GLOBAL_CFLAGS+= $(HOST_DEBUG_CFLAGS)
-HOST_GLOBAL_CPPFLAGS+= $(HOST_DEBUG_CPPFLAGS)
-endif
+HOST_GLOBAL_CFLAGS += $(HOST_RELEASE_CFLAGS)
+HOST_GLOBAL_CPPFLAGS += $(HOST_RELEASE_CPPFLAGS)
-ifeq ($(TARGET_BUILD_TYPE),release)
-TARGET_GLOBAL_CFLAGS+= $(TARGET_RELEASE_CFLAGS)
-TARGET_GLOBAL_CPPFLAGS+= $(TARGET_RELEASE_CPPFLAGS)
-else
-TARGET_GLOBAL_CFLAGS+= $(TARGET_DEBUG_CFLAGS)
-TARGET_GLOBAL_CPPFLAGS+= $(TARGET_DEBUG_CPPFLAGS)
-endif
+TARGET_GLOBAL_CFLAGS += $(TARGET_RELEASE_CFLAGS)
+TARGET_GLOBAL_CPPFLAGS += $(TARGET_RELEASE_CPPFLAGS)
# TODO: do symbol compression
TARGET_COMPRESS_MODULE_SYMBOLS := false
@@ -290,7 +275,7 @@
# The 'current' version is whatever this source tree is. Once the apicheck
# tool can generate the stubs from the xml files, we'll use that to be
# able to build back-versions. In the meantime, 'current' is the only
-# one supported.
+# one supported.
#
# sgrax is the opposite of xargs. It takes the list of args and puts them
# on each line for sort to process.
diff --git a/core/definitions.mk b/core/definitions.mk
index 17ec646..c84cbd8 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -41,9 +41,6 @@
# set of installed targets.
ALL_DEFAULT_INSTALLED_MODULES:=
-# Full paths to all targets that will be built.
-ALL_BUILT_MODULES:=
-
# The list of tags that have been defined by
# LOCAL_MODULE_TAGS. Each word in this variable maps
# to a corresponding ALL_MODULE_TAGS.<tagname> variable
@@ -794,6 +791,22 @@
endef
###########################################################
+## Commands for running gcc to compile an Objective-C file
+## This should never happen for target builds but this
+## will error at build time.
+###########################################################
+
+define transform-m-to-o-no-deps
+@echo "target ObjC: $(PRIVATE_MODULE) <= $<"
+$(call transform-c-or-s-to-o-no-deps)
+endef
+
+define transform-m-to-o
+$(transform-m-to-o-no-deps)
+$(hide) $(transform-d-to-p)
+endef
+
+###########################################################
## Commands for running gcc to compile a host C++ file
###########################################################
@@ -871,15 +884,45 @@
endef
###########################################################
+## Commands for running gcc to compile a host Objective-C file
+###########################################################
+
+define transform-host-m-to-o-no-deps
+@echo "host ObjC: $(PRIVATE_MODULE) <= $<"
+$(call transform-host-c-or-s-to-o-no-deps)
+endef
+
+define tranform-host-m-to-o
+$(transform-host-m-to-o-no-deps)
+$(transform-d-to-p)
+endef
+
+###########################################################
## Commands for running ar
###########################################################
+define extract-and-include-whole-static-libs
+$(foreach lib,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES), \
+ @echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(lib)]"; \
+ ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(lib)))_objs;\
+ rm -rf $$ldir; \
+ mkdir -p $$ldir; \
+ filelist=; \
+ for f in `$(TARGET_AR) t $(lib)`; do \
+ $(TARGET_AR) p $(lib) $$f > $$ldir/$$f; \
+ filelist="$$filelist $$ldir/$$f"; \
+ done ; \
+ $(TARGET_AR) $(TARGET_GLOBAL_ARFLAGS) $(PRIVATE_ARFLAGS) $@ $$filelist;\
+)
+endef
+
# Explicitly delete the archive first so that ar doesn't
# try to add to an existing archive.
define transform-o-to-static-lib
@mkdir -p $(dir $@)
-@echo "target StaticLib: $(PRIVATE_MODULE) ($@)"
@rm -f $@
+$(extract-and-include-whole-static-libs)
+@echo "target StaticLib: $(PRIVATE_MODULE) ($@)"
$(hide) $(TARGET_AR) $(TARGET_GLOBAL_ARFLAGS) $(PRIVATE_ARFLAGS) $@ $^
endef
@@ -1122,7 +1165,11 @@
$(addprefix -P , $(PRIVATE_RESOURCE_PUBLICS_OUTPUT)) \
$(addprefix -S , $(PRIVATE_RESOURCE_DIR)) \
$(addprefix -A , $(PRIVATE_ASSET_DIR)) \
- $(addprefix -I , $(PRIVATE_AAPT_INCLUDES))
+ $(addprefix -I , $(PRIVATE_AAPT_INCLUDES)) \
+ $(addprefix --min-sdk-version , $(DEFAULT_APP_TARGET_SDK)) \
+ $(addprefix --target-sdk-version , $(DEFAULT_APP_TARGET_SDK)) \
+ $(addprefix --version-code , $(PLATFORM_SDK_VERSION)) \
+ $(addprefix --version-name , $(PLATFORM_VERSION))
endef
ifeq ($(HOST_OS),windows)
@@ -1174,7 +1221,7 @@
echo Missing file $$f; \
exit 1; \
fi; \
- unzip -q $$f -d $(2); \
+ unzip -qo $$f -d $(2); \
(cd $(2) && rm -rf META-INF); \
done
endef
@@ -1189,21 +1236,21 @@
$(hide) mkdir -p $(PRIVATE_CLASS_INTERMEDIATES_DIR)
$(call unzip-jar-files,$(PRIVATE_STATIC_JAVA_LIBRARIES), \
$(PRIVATE_CLASS_INTERMEDIATES_DIR))
-$(call dump-words-to-file,$(PRIVATE_JAVA_SOURCES),$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list)
+$(call dump-words-to-file,$(PRIVATE_JAVA_SOURCES),$(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list)
$(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
- find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list; \
+ find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list; \
fi
-$(hide) tr ' ' '\n' < $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list \
- | sort -u > $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq
+$(hide) tr ' ' '\n' < $(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list \
+ | sort -u > $(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list-uniq
$(hide) $(TARGET_JAVAC) -encoding ascii $(PRIVATE_BOOTCLASSPATH) \
$(addprefix -classpath ,$(strip \
$(call normalize-path-list,$(PRIVATE_ALL_JAVA_LIBRARIES)))) \
$(strip $(PRIVATE_JAVAC_DEBUG_FLAGS)) $(xlint_unchecked) \
-extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR) \
- \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq \
+ \@$(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list-uniq \
|| ( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 )
-$(hide) rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list
-$(hide) rm -f $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq
+$(hide) rm -f $(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list
+$(hide) rm -f $(dir $(PRIVATE_CLASS_INTERMEDIATES_DIR))/java-source-list-uniq
$(hide) mkdir -p $(dir $@)
$(hide) jar $(if $(strip $(PRIVATE_JAR_MANIFEST)),-cfm,-cf) \
$@ $(PRIVATE_JAR_MANIFEST) -C $(PRIVATE_CLASS_INTERMEDIATES_DIR) .
@@ -1250,6 +1297,9 @@
# A list of dynamic and static parameters; build layers for
# dynamic params that lay over the static ones.
#TODO: update the manifest to point to the package file
+#Note that the version numbers are given to aapt as simple default
+#values; applications can override these by explicitly stating
+#them in their manifest.
define add-assets-to-package
$(hide) $(AAPT) package -z -u $(PRIVATE_AAPT_FLAGS) \
$(addprefix -c , $(PRODUCT_AAPT_CONFIG)) \
@@ -1257,6 +1307,10 @@
$(addprefix -S , $(PRIVATE_RESOURCE_DIR)) \
$(addprefix -A , $(PRIVATE_ASSET_DIR)) \
$(addprefix -I , $(PRIVATE_AAPT_INCLUDES)) \
+ $(addprefix --min-sdk-version , $(DEFAULT_APP_TARGET_SDK)) \
+ $(addprefix --target-sdk-version , $(DEFAULT_APP_TARGET_SDK)) \
+ $(addprefix --version-code , $(PLATFORM_SDK_VERSION)) \
+ $(addprefix --version-name , $(PLATFORM_VERSION)) \
-F $@
endef
@@ -1328,14 +1382,16 @@
$(PRIVATE_CLASS_INTERMEDIATES_DIR))
$(call dump-words-to-file,$(sort\
$(PRIVATE_JAVA_SOURCES)),\
- $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq)
+ $(PRIVATE_INTERMEDIATES_DIR)/java-source-list-uniq)
$(hide) $(HOST_JAVAC) -encoding ascii -g \
$(xlint_unchecked) \
$(addprefix -classpath ,$(strip \
$(call normalize-path-list,$(PRIVATE_ALL_JAVA_LIBRARIES)))) \
-extdirs "" -d $(PRIVATE_CLASS_INTERMEDIATES_DIR)\
- \@$(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq || \
+ \@$(PRIVATE_INTERMEDIATES_DIR)/java-source-list-uniq || \
( rm -rf $(PRIVATE_CLASS_INTERMEDIATES_DIR) ; exit 41 )
+$(hide) rm -f $(PRIVATE_INTERMEDIATES_DIR)/java-source-list
+$(hide) rm -f $(PRIVATE_INTERMEDIATES_DIR)/java-source-list-uniq
$(hide) jar $(if $(strip $(PRIVATE_JAR_MANIFEST)),-cfm,-cf) \
$@ $(PRIVATE_JAR_MANIFEST) $(PRIVATE_EXTRA_JAR_ARGS) \
-C $(PRIVATE_CLASS_INTERMEDIATES_DIR) .
@@ -1481,8 +1537,16 @@
$(error HOST_OS must define get-file-size)
endif
-# $(1): The file to check (often $@)
-# $(2): The maximum size, in decimal bytes
+# Convert a partition data size (eg, as reported in /proc/mtd) to the
+# size of the image used to flash that partition (which includes a
+# 64-byte spare area for each 2048-byte page).
+# $(1): the partition data size
+define image-size-from-data-size
+$(shell echo $$(($(1) / 2048 * (2048+64))))
+endef
+
+# $(1): The file(s) to check (often $@)
+# $(2): The maximum total image size, in decimal bytes
#
# If $(2) is empty, evaluates to "true"
#
@@ -1491,19 +1555,20 @@
# next whole flash block size.
define assert-max-file-size
$(if $(2), \
- fileSize=`$(call get-file-size,$(1))`; \
- maxSize=$(2); \
- onePct=`expr "(" $$maxSize + 99 ")" / 100`; \
- onePct=`expr "(" "(" $$onePct + $(BOARD_FLASH_BLOCK_SIZE) - 1 ")" / \
- $(BOARD_FLASH_BLOCK_SIZE) ")" "*" $(BOARD_FLASH_BLOCK_SIZE)`; \
- reserve=`expr 2 "*" $(BOARD_FLASH_BLOCK_SIZE)`; \
- if [ "$$onePct" -gt "$$reserve" ]; then \
- reserve="$$onePct"; \
- fi; \
- maxSize=`expr $$maxSize - $$reserve`; \
- if [ "$$fileSize" -gt "$$maxSize" ]; then \
- echo "error: $(1) too large ($$fileSize > [$(2) - $$reserve])"; \
- false; \
+ size=$$(for i in $(1); do $(call get-file-size,$$i); echo +; done; echo 0); \
+ total=$$(( $$( echo "$$size" ) )); \
+ printname=$$(echo -n "$(1)" | tr " " +); \
+ echo "$$printname total size is $$total"; \
+ img_blocksize=$(call image-size-from-data-size,$(BOARD_FLASH_BLOCK_SIZE)); \
+ twoblocks=$$((img_blocksize * 2)); \
+ onepct=$$((((($(2) / 100) - 1) / img_blocksize + 1) * img_blocksize)); \
+ reserve=$$((twoblocks > onepct ? twoblocks : onepct)); \
+ maxsize=$$(($(2) - reserve)); \
+ if [ "$$total" -gt "$$maxsize" ]; then \
+ echo "error: $$printname too large ($$total > [$(2) - $$reserve])"; \
+ false; \
+ elif [ "$$total" -gt $$((maxsize - 32768)) ]; then \
+ echo "WARNING: $$printname approaching size limit ($$total now; limit $$maxsize)"; \
fi \
, \
true \
diff --git a/core/droiddoc.mk b/core/droiddoc.mk
index 30bd918..03ffa55 100644
--- a/core/droiddoc.mk
+++ b/core/droiddoc.mk
@@ -127,7 +127,7 @@
$(HOST_OUT_JAVA_LIBRARIES)/clearsilver$(COMMON_JAVA_PACKAGE_SUFFIX) \
$(HOST_OUT_SHARED_LIBRARIES)/libclearsilver-jni$(HOST_JNILIB_SUFFIX)
-$(full_target): PRIVATE_DOCLETPATH := $(HOST_OUT_JAVA_LIBRARIES)/clearsilver$(COMMON_JAVA_PACKAGE_SUFFIX):$(HOST_OUT_JAVA_LIBRARIES)/droiddoc$(COMMON_JAVA_PACKAGE_SUFFIX)
+$(full_target): PRIVATE_DOCLETPATH := $(HOST_OUT_JAVA_LIBRARIES)/clearsilver$(COMMON_JAVA_PACKAGE_SUFFIX):$(HOST_OUT_JAVA_LIBRARIES)/droiddoc$(COMMON_JAVA_PACKAGE_SUFFIX):$(HOST_OUT_JAVA_LIBRARIES)/apicheck$(COMMON_JAVA_PACKAGE_SUFFIX)
$(full_target): PRIVATE_CURRENT_BUILD := -hdf page.build $(BUILD_ID)-$(BUILD_NUMBER)
$(full_target): PRIVATE_CURRENT_TIME := -hdf page.now "$(shell date "+%d %b %Y %k:%M")"
$(full_target): PRIVATE_TEMPLATE_DIR := $(LOCAL_DROIDDOC_TEMPLATE_DIR)
diff --git a/core/envsetup.mk b/core/envsetup.mk
index ba93549..31901e9 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -7,6 +7,9 @@
# OUT_DIR is also set to "out" if it's not already set.
# this allows you to set it to somewhere else if you like
+# Set up version information.
+include $(BUILD_SYSTEM)/version_defaults.mk
+
# ---------------------------------------------------------------
# If you update the build system such that the environment setup
# or buildspec.mk need to be updated, increment this number, and
@@ -319,6 +322,8 @@
ifneq ($(PRINT_BUILD_CONFIG),)
$(info ============================================)
+$(info PLATFORM_VERSION_CODENAME=$(PLATFORM_VERSION_CODENAME))
+$(info PLATFORM_VERSION=$(PLATFORM_VERSION))
$(info TARGET_PRODUCT=$(TARGET_PRODUCT))
$(info TARGET_BUILD_VARIANT=$(TARGET_BUILD_VARIANT))
$(info TARGET_SIMULATOR=$(TARGET_SIMULATOR))
diff --git a/core/java.mk b/core/java.mk
index 9150a5c..658b173 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -37,6 +37,54 @@
$(error LOCAL_BUILT_MODULE_STEM may not be "$(LOCAL_BUILT_MODULE_STEM)")
endif
+
+##############################################################################
+# Define the intermediate targets before including base_rules so they get
+# the correct environment.
+##############################################################################
+
+intermediates := $(call local-intermediates-dir)
+intermediates.COMMON := $(call local-intermediates-dir,COMMON)
+
+# This is cleared below, and re-set if we really need it.
+full_classes_jar := $(intermediates.COMMON)/classes.jar
+
+# Emma source code coverage
+ifneq ($(EMMA_INSTRUMENT),true)
+LOCAL_NO_EMMA_INSTRUMENT := true
+LOCAL_NO_EMMA_COMPILE := true
+endif
+
+# Choose leaf name for the compiled jar file.
+ifneq ($(LOCAL_NO_EMMA_COMPILE),true)
+full_classes_compiled_jar_leaf := classes-no-debug-var.jar
+else
+full_classes_compiled_jar_leaf := classes-full-debug.jar
+endif
+full_classes_compiled_jar := $(intermediates.COMMON)/$(full_classes_compiled_jar_leaf)
+
+emma_intermediates_dir := $(intermediates.COMMON)/emma_out
+# the 'lib/$(full_classes_compiled_jar_leaf)' portion of this path is fixed in
+# the emma tool
+full_classes_emma_jar := $(emma_intermediates_dir)/lib/$(full_classes_compiled_jar_leaf)
+full_classes_stubs_jar := $(intermediates.COMMON)/stubs.jar
+full_classes_jarjar_jar := $(full_classes_jar)
+built_dex := $(intermediates.COMMON)/classes.dex
+
+LOCAL_INTERMEDIATE_TARGETS += \
+ $(full_classes_jar) \
+ $(full_classes_compiled_jar) \
+ $(full_classes_emma_jar) \
+ $(full_classes_stubs_jar) \
+ $(full_classes_jarjar_jar) \
+ $(built_dex)
+
+
+# TODO: It looks like the only thing we need from base_rules is
+# all_java_sources. See if we can get that by adding a
+# common_java.mk, and moving the include of base_rules.mk to
+# after all the declarations.
+
#######################################
include $(BUILD_SYSTEM)/base_rules.mk
#######################################
@@ -65,6 +113,7 @@
# LOCAL_BUILT_MODULE, so it will inherit the necessary PRIVATE_*
# variable definitions.
full_classes_jar := $(intermediates.COMMON)/classes.jar
+built_dex := $(intermediates.COMMON)/classes.dex
# Droiddoc isn't currently able to generate stubs for modules, so we're just
# allowing it to use the classes.jar as the "stubs" that would be use to link
@@ -74,34 +123,28 @@
# it, so it's closest to what's on the device.
# - This extra copy, with the dependency on LOCAL_BUILT_MODULE allows the
# PRIVATE_ vars to be preserved.
-full_classes_stubs_jar := $(intermediates.COMMON)/stubs.jar
$(full_classes_stubs_jar): PRIVATE_SOURCE_FILE := $(full_classes_jar)
$(full_classes_stubs_jar) : $(LOCAL_BUILT_MODULE) | $(ACP)
@echo Copying $(PRIVATE_SOURCE_FILE)
$(hide) $(ACP) -fp $(PRIVATE_SOURCE_FILE) $@
ALL_MODULES.$(LOCAL_MODULE).STUBS := $(full_classes_stubs_jar)
-# Emma source code coverage
-ifneq ($(EMMA_INSTRUMENT),true)
-LOCAL_NO_EMMA_INSTRUMENT := true
-LOCAL_NO_EMMA_COMPILE := true
-endif
-
-# Choose leaf name for the compiled jar file.
-ifneq ($(LOCAL_NO_EMMA_COMPILE),true)
-full_classes_compiled_jar_leaf := classes-no-debug-var.jar
-else
-full_classes_compiled_jar_leaf := classes-full-debug.jar
-endif
-
# Compile the java files to a .jar file.
# This intentionally depends on java_sources, not all_java_sources.
# Deps for generated source files must be handled separately,
# via deps on the target that generates the sources.
-full_classes_compiled_jar := $(intermediates.COMMON)/$(full_classes_compiled_jar_leaf)
$(full_classes_compiled_jar): $(java_sources) $(full_java_lib_deps)
$(transform-java-to-classes.jar)
+# All of the rules after full_classes_compiled_jar are very unlikely
+# to fail except for bugs in their respective tools. If you would
+# like to run these rules, add the "all" modifier goal to the make
+# command line.
+# This overwrites the value defined in base_rules.mk. That's a little
+# dirty. It's preferable to set LOCAL_CHECKED_MODULE, but this has to
+# be done after the inclusion of base_rules.mk.
+ALL_MODULES.$(LOCAL_MODULE).CHECKED := $(full_classes_compiled_jar)
+
ifneq ($(LOCAL_NO_EMMA_COMPILE),true)
# If you instrument class files that have local variable debug information in
# them emma does not correctly maintain the local variable table.
@@ -115,11 +158,6 @@
$(full_classes_compiled_jar): PRIVATE_JAVAC_DEBUG_FLAGS := -g
endif
-emma_intermediates_dir := $(intermediates.COMMON)/emma_out
-# the 'lib/$(full_classes_compiled_jar_leaf)' portion of this path is fixed in
-# the emma tool
-full_classes_emma_jar := $(emma_intermediates_dir)/lib/$(full_classes_compiled_jar_leaf)
-
ifeq ($(LOCAL_IS_STATIC_JAVA_LIBRARY),true)
# Skip adding emma instrumentation to class files if this is a static library,
# since it will be instrumented by the package that includes it
@@ -142,7 +180,6 @@
# Run jarjar if necessary, otherwise just copy the file. This is the last
# part of this step, so the output of this command is full_classes_jar.
-full_classes_jarjar_jar := $(full_classes_jar)
ifneq ($(strip $(LOCAL_JARJAR_RULES)),)
$(full_classes_jarjar_jar): PRIVATE_JARJAR_RULES := $(LOCAL_JARJAR_RULES)
$(full_classes_jarjar_jar): $(full_classes_emma_jar) | jarjar
@@ -154,9 +191,6 @@
$(hide) $(ACP) $< $@
endif
-
-built_dex := $(intermediates.COMMON)/classes.dex
-
# Override PRIVATE_INTERMEDIATES_DIR so that install-dex-debug
# will work even when intermediates != intermediates.COMMON.
$(built_dex): PRIVATE_INTERMEDIATES_DIR := $(intermediates.COMMON)
@@ -188,9 +222,8 @@
$(LOCAL_MODULE)-findbugs : $(findbugs_html)
$(findbugs_html) : $(findbugs_xml)
@mkdir -p $(dir $@)
- @echo UnionBugs: $@
- $(hide) prebuilt/common/findbugs/bin/unionBugs $(PRIVATE_XML_FILE) \
- | prebuilt/common/findbugs/bin/convertXmlToText -html:fancy.xsl \
+ @echo ConvertXmlToText: $@
+ $(hide) prebuilt/common/findbugs/bin/convertXmlToText -html:fancy.xsl $(PRIVATE_XML_FILE) \
> $@
$(LOCAL_MODULE)-findbugs : $(findbugs_html)
diff --git a/core/main.mk b/core/main.mk
index fdf2567..2c7bb78 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -85,13 +85,48 @@
$(error Directory names containing spaces not supported)
endif
-# Set up version information.
-include $(BUILD_SYSTEM)/version_defaults.mk
+
+# The windows build server currently uses 1.6. This will be fixed.
+ifneq ($(HOST_OS),windows)
+
+# Check for the correct version of java
+java_version := $(shell java -version 2>&1 | head -n 1 | grep '[ "]1\.5[\. "$$]')
+ifeq ($(strip $(java_version)),)
+$(info ************************************************************)
+$(info You are attempting to build with the incorrect version)
+$(info of java.)
+$(info $(space))
+$(info Your version is: $(shell java -version 2>&1 | head -n 1).)
+$(info The correct version is: 1.5.)
+$(info $(space))
+$(info Please follow the machine setup instructions at)
+$(info $(space)$(space)$(space)$(space)http://source.android.com/download)
+$(info ************************************************************)
+$(error stop)
+endif
+
+# Check for the correct version of javac
+javac_version := $(shell javac -version 2>&1 | head -n 1 | grep '[ "]1\.5[\. "$$]')
+ifeq ($(strip $(javac_version)),)
+$(info ************************************************************)
+$(info You are attempting to build with the incorrect version)
+$(info of javac.)
+$(info $(space))
+$(info Your version is: $(shell javac -version 2>&1 | head -n 1).)
+$(info The correct version is: 1.5.)
+$(info $(space))
+$(info Please follow the machine setup instructions at)
+$(info $(space)$(space)$(space)$(space)http://source.android.com/download)
+$(info ************************************************************)
+$(error stop)
+endif
+
+endif # windows
# These are the modifier targets that don't do anything themselves, but
# change the behavior of the build.
# (must be defined before including definitions.make)
-INTERNAL_MODIFIER_TARGETS := showcommands
+INTERNAL_MODIFIER_TARGETS := showcommands checkbuild
# Bring in standard build system definitions.
include $(BUILD_SYSTEM)/definitions.mk
@@ -257,11 +292,11 @@
endif
-# If all they typed was make showcommands, we'll actually build
-# the default target.
-ifeq ($(MAKECMDGOALS),showcommands)
-.PHONY: showcommands
-showcommands: $(DEFAULT_GOAL)
+# If they only used the modifier goals (showcommands, checkbuild), we'll actually
+# build the default target.
+ifeq ($(filter-out $(INTERNAL_MODIFIER_TARGETS),$(MAKECMDGOALS)),)
+.PHONY: $(INTERNAL_MODIFIER_TARGETS)
+$(INTERNAL_MODIFIER_TARGETS): $(DEFAULT_GOAL)
endif
# These targets are going to delete stuff, don't bother including
@@ -301,7 +336,6 @@
dalvik/tools/dmtracedump \
dalvik/tools/hprof-conv \
development/emulator/mksdcard \
- development/tools/activitycreator \
development/tools/line_endings \
development/host \
external/expat \
@@ -328,6 +362,7 @@
dalvik/dx \
dalvik/libcore \
development/apps \
+ development/tools/archquery \
development/tools/androidprefs \
development/tools/apkbuilder \
development/tools/jarutils \
@@ -410,6 +445,10 @@
# Clean up/verify variables defined by the board config file.
TARGET_BOOTLOADER_BOARD_NAME := $(strip $(TARGET_BOOTLOADER_BOARD_NAME))
+TARGET_CPU_ABI := $(strip $(TARGET_CPU_ABI))
+ifeq ($(TARGET_CPU_ABI),)
+ $(error No TARGET_CPU_ABI defined by board config: $(board_config_mk))
+endif
#
# Include all of the makefiles in the system
@@ -491,7 +530,6 @@
# poisons the rest of the tags and shouldn't appear
# on any list.
Default_MODULES := $(sort $(ALL_DEFAULT_INSTALLED_MODULES) \
- $(ALL_BUILT_MODULES) \
$(CUSTOM_MODULES))
# TODO: Remove the 3 places in the tree that use
# ALL_DEFAULT_INSTALLED_MODULES and get rid of it from this list.
@@ -557,7 +595,7 @@
endif
-# config/Makefile contains extra stuff that we don't want to pollute this
+# build/core/Makefile contains extra stuff that we don't want to pollute this
# top-level makefile with. It expects that ALL_DEFAULT_INSTALLED_MODULES
# contains everything that's built during the current make, but it also further
# extends ALL_DEFAULT_INSTALLED_MODULES.
@@ -568,6 +606,20 @@
endif # dont_bother
+# These are additional goals that we build, in order to make sure that there
+# is as little code as possible in the tree that doesn't build.
+modules_to_check := $(foreach m,$(ALL_MODULES),$(ALL_MODULES.$(m).CHECKED))
+
+# If you would like to build all goals, and not skip any intermediate
+# steps, you can pass the "all" modifier goal on the commandline.
+ifneq ($(filter all,$(MAKECMDGOALS)),)
+modules_to_check += $(foreach m,$(ALL_MODULES),$(ALL_MODULES.$(m).BUILT))
+endif
+
+# for easier debugging
+modules_to_check := $(sort $(modules_to_check))
+#$(error modules_to_check $(modules_to_check))
+
# -------------------------------------------------------------------
# This is used to to get the ordering right, you can also use these,
# but they're considered undocumented, so don't complain if their
@@ -585,10 +637,16 @@
# All the droid stuff, in directories
.PHONY: files
-files: prebuilt $(modules_to_install) $(INSTALLED_ANDROID_INFO_TXT_TARGET)
+files: prebuilt \
+ $(modules_to_install) \
+ $(modules_to_check) \
+ $(INSTALLED_ANDROID_INFO_TXT_TARGET)
# -------------------------------------------------------------------
+.PHONY: checkbuild
+checkbuild: $(modules_to_check)
+
.PHONY: ramdisk
ramdisk: $(INSTALLED_RAMDISK_TARGET)
diff --git a/core/pathmap.mk b/core/pathmap.mk
index 13cb80d..e281b9d 100644
--- a/core/pathmap.mk
+++ b/core/pathmap.mk
@@ -83,6 +83,8 @@
sax \
telephony \
wifi \
+ vpn \
+ keystore \
)
#
diff --git a/core/prelink-linux-arm.map b/core/prelink-linux-arm.map
index 1cd2aa4..75c05e5 100644
--- a/core/prelink-linux-arm.map
+++ b/core/prelink-linux-arm.map
@@ -21,6 +21,7 @@
libevent.so 0xAF800000
libssl.so 0xAF700000
libcrypto.so 0xAF500000
+libsysutils.so 0xAF400000
# bluetooth
liba2dp.so 0xAEE00000
@@ -91,19 +92,26 @@
libqcamera.so 0xA9400000
# pv libraries
-libopencorenet_support.so 0xA7D20000
-libpvasf.so 0xA7BC0000
-libpvasfreg.so 0xA7B70000
-libopencoredownload.so 0xA7B40000
-libopencoredownloadreg.so 0xA7B00000
-libopencorenet_support.so 0xA7A00000
-libopencorertsp.so 0xA7900000
-libopencorertspreg.so 0xA7800000
-libopencoreauthor.so 0xA7600000
-libopencorecommon.so 0xA7500000
-libopencoremp4.so 0xA7400000
-libopencoremp4reg.so 0xA7300000
-libopencoreplayer.so 0xA7000000
+libpvasf.so 0xA7C26000
+libpvasfreg.so 0xA7C00000
+libomx_sharedlibrary.so 0xA7BA0000
+libopencore_download.so 0xA7B40000
+libopencore_downloadreg.so 0xA7B00000
+libopencore_net_support.so 0xA7A00000
+libopencore_rtsp.so 0xA7900000
+libopencore_rtspreg.so 0xA7890000
+libopencore_author.so 0xA7800000
+libomx_aacdec_sharedlibrary.so 0xA7700000
+libomx_amrdec_sharedlibrary.so 0xA76A0000
+libomx_amrenc_sharedlibrary.so 0xA7680000
+libomx_avcdec_sharedlibrary.so 0xA7660000
+libomx_avcenc_sharedlibrary.so 0xA7610000
+libomx_m4vdec_sharedlibrary.so 0xA75C0000
+libomx_m4venc_sharedlibrary.so 0xA7590000
+libomx_mp3dec_sharedlibrary.so 0xA7450000
+libopencore_mp4local.so 0xA7400000
+libopencore_mp4localreg.so 0xA7300000
+libopencore_player.so 0xA7000000
# opencore hardware support
libmm-adspsvc.so 0xA6FFD000
@@ -113,6 +121,10 @@
libOmxVidEnc.so 0xA6F60000
libopencorehw.so 0xA6F50000
+# pv libraries
+libopencore_common.so 0xA6000000
+libqcomm_omx.so 0xA5A00000
+
# libraries for specific apps or temporary libraries
libcam_ipl.so 0x9F000000
libwbxml.so 0x9E800000
@@ -137,3 +149,4 @@
librpc.so 0x9A400000
libtrace_test.so 0x9A300000
libsrec_jni.so 0x9A200000
+libcerttool_jni.so 0x9A100000
diff --git a/core/product_config.mk b/core/product_config.mk
index 64488d8..7cfa5f4 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -110,11 +110,11 @@
TARGET_BUILD_VARIANT := $(word 2,$(product_goals))
# The build server wants to do make PRODUCT-dream-installclean
- # which really means TARGET_PRODUCT=dream make installclean.
+ # which really means TARGET_PRODUCT=dream make installclean.
ifneq ($(filter-out $(INTERNAL_VALID_VARIANTS),$(TARGET_BUILD_VARIANT)),)
MAKECMDGOALS := $(MAKECMDGOALS) $(TARGET_BUILD_VARIANT)
TARGET_BUILD_VARIANT := eng
- default_goal_substitution :=
+ default_goal_substitution :=
else
default_goal_substitution := $(DEFAULT_GOAL)
endif
@@ -135,7 +135,7 @@
#
# Note that modifying this will not affect the goals that make will
# attempt to build, but it's important because we inspect this value
- # in certain situations (like for "make sdk").
+ # in certain situations (like for "make sdk").
#
MAKECMDGOALS := $(patsubst $(goal_name),$(default_goal_substitution),$(MAKECMDGOALS))
@@ -185,7 +185,10 @@
# in PRODUCT_LOCALES, add them to PRODUCT_LOCALES.
extra_locales := $(filter-out $(PRODUCT_LOCALES),$(CUSTOM_LOCALES))
ifneq (,$(extra_locales))
- $(info Adding CUSTOM_LOCALES [$(extra_locales)] to PRODUCT_LOCALES [$(PRODUCT_LOCALES)])
+ ifneq ($(CALLED_FROM_SETUP),true)
+ # Don't spam stdout, because envsetup.sh may be scraping values from it.
+ $(info Adding CUSTOM_LOCALES [$(extra_locales)] to PRODUCT_LOCALES [$(PRODUCT_LOCALES)])
+ endif
PRODUCT_LOCALES += $(extra_locales)
extra_locales :=
endif
@@ -202,7 +205,7 @@
PRODUCT_MODEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_MODEL))
ifndef PRODUCT_MODEL
- PRODUCT_MODEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_NAME))
+ PRODUCT_MODEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_NAME))
endif
PRODUCT_MANUFACTURER := \
@@ -245,32 +248,19 @@
$(ADDITIONAL_BUILD_PROPERTIES) \
$(PRODUCT_PROPERTY_OVERRIDES)
-# Get the list of OTA public keys for the product.
-OTA_PUBLIC_KEYS := \
- $(sort \
- $(OTA_PUBLIC_KEYS) \
- $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OTA_PUBLIC_KEYS) \
- )
-
-# HACK: Not all products define OTA keys yet, and the -user build
-# will fail if no keys are defined.
-# TODO: Let a product opt out of needing OTA keys, and stop defaulting to
-# the test key as soon as possible.
-ifeq (,$(strip $(OTA_PUBLIC_KEYS)))
- ifeq (,$(CALLED_FROM_SETUP))
- $(warning WARNING: adding test OTA key)
- endif
- OTA_PUBLIC_KEYS := $(SRC_TARGET_DIR)/product/security/testkey.x509.pem
-endif
+# The OTA key(s) specified by the product config, if any. The names
+# of these keys are stored in the target-files zip so that post-build
+# signing tools can substitute them for the test key embedded by
+# default.
+PRODUCT_OTA_PUBLIC_KEYS := $(sort \
+ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_OTA_PUBLIC_KEYS))
# ---------------------------------------------------------------
-# Force the simulator to be the simulator, and make BUILD_TYPE
-# default to debug.
+# Simulator overrides
ifeq ($(TARGET_PRODUCT),sim)
+ # Tell the build system to turn on some special cases
+ # to deal with the simulator product.
TARGET_SIMULATOR := true
- ifeq (,$(strip $(TARGET_BUILD_TYPE)))
- TARGET_BUILD_TYPE := debug
- endif
# dexpreopt doesn't work when building the simulator
DISABLE_DEXPREOPT := true
endif
diff --git a/core/static_library.mk b/core/static_library.mk
index 252dfd0..2138e46 100644
--- a/core/static_library.mk
+++ b/core/static_library.mk
@@ -25,5 +25,6 @@
$(all_objects) : TARGET_GLOBAL_CPPFLAGS :=
endif
+$(LOCAL_BUILT_MODULE): $(built_whole_libraries)
$(LOCAL_BUILT_MODULE): $(all_objects)
$(transform-o-to-static-lib)
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index aed01b2..103a738 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -25,6 +25,8 @@
endif
CTS_HOST_JAR := $(HOST_OUT_JAVA_LIBRARIES)/cts.jar
+junit_host_jar := $(HOST_OUT_JAVA_LIBRARIES)/junit.jar
+
CTS_CORE_CASE_LIST := android.core.tests.annotation \
android.core.tests.archive \
android.core.tests.concurrent \
@@ -56,6 +58,7 @@
CtsGraphicsTestCases \
CtsHardwareTestCases \
CtsLocationTestCases \
+ CtsMediaTestCases \
CtsOsTestCases \
CtsPermissionTestCases \
CtsProviderTestCases \
@@ -76,7 +79,9 @@
DEFAULT_TEST_PLAN := $(PRIVATE_DIR)/resource/plans
-$(cts_dir)/all_cts_files_stamp: $(CTS_CASE_LIST) | $(ACP)
+$(cts_dir)/all_cts_files_stamp: PRIVATE_JUNIT_HOST_JAR := $(junit_host_jar)
+
+$(cts_dir)/all_cts_files_stamp: $(CTS_CASE_LIST) $(junit_host_jar) $(ACP)
# Make necessary directory for CTS
@rm -rf $(PRIVATE_CTS_DIR)
@mkdir -p $(TMP_DIR)
@@ -87,6 +92,8 @@
# Copy executable to CTS directory
$(hide) $(ACP) -fp $(CTS_HOST_JAR) $(PRIVATE_DIR)/tools
$(hide) $(ACP) -fp $(CTS_EXECUTABLE_PATH) $(PRIVATE_DIR)/tools
+# Copy junit jar
+ $(hide) $(ACP) -fp $(PRIVATE_JUNIT_HOST_JAR) $(PRIVATE_DIR)/tools
# Change mode of the executables
$(hide) chmod ug+rwX $(PRIVATE_DIR)/tools/$(notdir $(CTS_EXECUTABLE_PATH))
$(foreach apk,$(CTS_CASE_LIST), \
@@ -102,7 +109,7 @@
$(hide) java $(PRIVATE_JAVAOPTS) \
-classpath $(PRIVATE_CLASSPATH) \
$(PRIVATE_PARAMS) CollectAllTests $(1) \
- $(2) $(3) $(4)
+ $(2) $(3)
endef
CORE_INTERMEDIATES :=$(call intermediates-dir-for,JAVA_LIBRARIES,core,,COMMON)
diff --git a/core/tasks/localize.mk b/core/tasks/localize.mk
deleted file mode 100644
index 12e7b5c..0000000
--- a/core/tasks/localize.mk
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2008 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Rules for building the xlb files for export for translation.
-#
-
-# Gather all of the resource files for the default locale -- that is,
-# all resources in directories called values or values-something, where
-# one of the - separated segments is not two characters long -- those are the
-# language directories, and we don't want those.
-all_resource_files := $(foreach pkg, \
- $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PACKAGES), \
- $(PACKAGES.$(pkg).RESOURCE_FILES))
-values_resource_files := $(shell echo $(all_resource_files) | \
- tr -s / | \
- tr " " "\n" | \
- grep -E "\/values[^/]*/(strings.xml|arrays.xml)$$" | \
- grep -v -E -e "-[a-zA-Z]{2}[/\-]")
-
-xlb_target := $(PRODUCT_OUT)/strings.xlb
-
-$(xlb_target): $(values_resource_files) | $(LOCALIZE)
- @echo XLB: $@
- $(hide) mkdir -p $(dir $@)
- $(hide) rm -f $@
- $(hide) $(LOCALIZE) xlb $@ $^
-
-# Add a phony target so typing make xlb is convenient
-.PHONY: xlb
-xlb: $(xlb_target)
-
-# We want this on the build-server builds, but no reason to inflict it on
-# everyone
-$(call dist-for-goals, droid, $(xlb_target))
-
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 578d779..ca8487f 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -20,6 +20,8 @@
# Guarantees that the following are defined:
# PLATFORM_VERSION
# PLATFORM_SDK_VERSION
+# PLATFORM_VERSION_CODENAME
+# DEFAULT_APP_TARGET_SDK
# BUILD_ID
# BUILD_NUMBER
#
@@ -39,17 +41,40 @@
# which is the version that we reveal to the end user.
# Update this value when the platform version changes (rather
# than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 1.5
+ PLATFORM_VERSION := Donut
endif
ifeq "" "$(PLATFORM_SDK_VERSION)"
# This is the canonical definition of the SDK version, which defines
- # the set of APIs and functionality available in the platform. This is
- # a single integer, that increases monotonically as updates to the SDK
- # are released.
+ # the set of APIs and functionality available in the platform. It
+ # is a single integer that increases monotonically as updates to
+ # the SDK are released. It should only be incremented when the APIs for
+ # the new release are frozen (so that developers don't write apps against
+ # intermediate builds). During development, this number remains at the
+ # SDK version the branch is based on and PLATFORM_VERSION_CODENAME holds
+ # the code-name of the new development work.
PLATFORM_SDK_VERSION := 3
endif
+ifeq "" "$(PLATFORM_VERSION_CODENAME)"
+ # If the build is not a final release build, then this is the current
+ # development code-name. If this is a final release build, it is simply "REL".
+ PLATFORM_VERSION_CODENAME := Donut
+endif
+
+ifeq "" "$(DEFAULT_APP_TARGET_SDK)"
+ # This is the default minSdkVersion and targetSdkVersion to use for
+ # all .apks created by the build system. It can be overridden by explicitly
+ # setting these in the .apk's AndroidManifest.xml. It is either the code
+ # name of the development build or, if this is a release build, the official
+ # SDK version of this release.
+ ifeq "REL" "$(PLATFORM_VERSION_CODENAME)"
+ DEFAULT_APP_TARGET_SDK := $(PLATFORM_SDK_VERSION)
+ else
+ DEFAULT_APP_TARGET_SDK := $(PLATFORM_VERSION_CODENAME)
+ endif
+endif
+
ifeq "" "$(BUILD_ID)"
# Used to signify special builds. E.g., branches and/or releases,
# like "M5-RC7". Can be an arbitrary string, but must be a single
diff --git a/envsetup.sh b/envsetup.sh
index f8f20ab..984167e 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -289,30 +289,6 @@
#
function chooseproduct()
{
- # Find the makefiles that must exist for a product.
- # Send stderr to /dev/null in case partner isn't present.
- local -a choices
- choices=(`/bin/ls build/target/board/*/BoardConfig.mk vendor/*/*/BoardConfig.mk 2> /dev/null`)
-
- local choice
- local -a prodlist
- for choice in ${choices[@]}
- do
- # The product name is the name of the directory containing
- # the makefile we found, above.
- prodlist=(${prodlist[@]} `dirname ${choice} | xargs basename`)
- done
-
- local index=1
- local p
- echo "Product choices are:"
- for p in ${prodlist[@]}
- do
- echo " $index. $p"
- let "index = $index + 1"
- done
-
-
if [ "x$TARGET_PRODUCT" != x ] ; then
default_value=$TARGET_PRODUCT
else
@@ -327,8 +303,7 @@
local ANSWER
while [ -z "$TARGET_PRODUCT" ]
do
- echo "You can also type the name of a product if you know it."
- echo -n "Which would you like? [$default_value] "
+ echo -n "Which product would you like? [$default_value] "
if [ -z "$1" ] ; then
read ANSWER
else
@@ -338,13 +313,6 @@
if [ -z "$ANSWER" ] ; then
export TARGET_PRODUCT=$default_value
- elif (echo -n $ANSWER | grep -q -e "^[0-9][0-9]*$") ; then
- local poo=`echo -n $ANSWER`
- if [ $poo -le ${#prodlist[@]} ] ; then
- export TARGET_PRODUCT=${prodlist[$(($ANSWER-$_arrayoffset))]}
- else
- echo "** Bad product selection: $ANSWER"
- fi
else
if check_product $ANSWER
then
@@ -976,18 +944,14 @@
echo "Couldn't locate the top of the tree. Try setting TOP." >&2
return
fi
- (cd "$T" && development/tools/runtest $@)
+ (cd "$T" && development/testrunner/runtest.py $@)
}
-# simple shortcut to the runtest.py command
+# TODO: Remove this some time after 1 June 2009
function runtest_py()
{
- T=$(gettop)
- if [ ! "$T" ]; then
- echo "Couldn't locate the top of the tree. Try setting TOP." >&2
- return
- fi
- (cd "$T" && development/testrunner/runtest.py $@)
+ echo "runtest_py is obsolete; use runtest instead" >&2
+ return 1
}
function godir () {
@@ -1045,7 +1009,7 @@
unset _xarray
# Execute the contents of any vendorsetup.sh files we can find.
-for f in `/bin/ls vendor/*/vendorsetup.sh 2> /dev/null`
+for f in `/bin/ls vendor/*/vendorsetup.sh vendor/*/build/vendorsetup.sh 2> /dev/null`
do
echo "including $f"
. $f
diff --git a/libs/host/Android.mk b/libs/host/Android.mk
index 81f2cc5..d02e4b2 100644
--- a/libs/host/Android.mk
+++ b/libs/host/Android.mk
@@ -3,7 +3,6 @@
LOCAL_SRC_FILES:= \
CopyFile.c \
- Directories.cpp \
pseudolocalize.cpp
ifeq ($(HOST_OS),cygwin)
diff --git a/libs/host/Directories.cpp b/libs/host/Directories.cpp
deleted file mode 100644
index a34f5b7..0000000
--- a/libs/host/Directories.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <host/Directories.h>
-#include <utils/String8.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#ifdef HAVE_MS_C_RUNTIME
-#include <direct.h>
-#endif
-
-using namespace android;
-using namespace std;
-
-string
-parent_dir(const string& path)
-{
- return string(String8(path.c_str()).getPathDir().string());
-}
-
-int
-mkdirs(const char* last)
-{
- String8 dest;
- const char* s = last-1;
- int err;
- do {
- s++;
- if (s > last && (*s == '.' || *s == 0)) {
- String8 part(last, s-last);
- dest.appendPath(part);
-#ifdef HAVE_MS_C_RUNTIME
- err = _mkdir(dest.string());
-#else
- err = mkdir(dest.string(), S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP);
-#endif
- if (err != 0) {
- return err;
- }
- last = s+1;
- }
- } while (*s);
- return 0;
-}
diff --git a/target/board/generic/BoardConfig.mk b/target/board/generic/BoardConfig.mk
index a874742..6ec2de3 100644
--- a/target/board/generic/BoardConfig.mk
+++ b/target/board/generic/BoardConfig.mk
@@ -7,5 +7,6 @@
TARGET_NO_BOOTLOADER := true
TARGET_NO_KERNEL := true
TARGET_NO_RADIOIMAGE := true
+TARGET_CPU_ABI := armeabi
HAVE_HTC_AUDIO_DRIVER := true
BOARD_USES_GENERIC_AUDIO := true
diff --git a/target/board/sim/BoardConfig.mk b/target/board/sim/BoardConfig.mk
index 92679d9..491b30f 100644
--- a/target/board/sim/BoardConfig.mk
+++ b/target/board/sim/BoardConfig.mk
@@ -17,6 +17,9 @@
# Don't bother with a kernel
TARGET_NO_KERNEL := true
+# The simulator does not support native code at all
+TARGET_CPU_ABI := none
+
#the simulator partially emulates the original HTC /dev/eac audio interface
HAVE_HTC_AUDIO_DRIVER := true
BOARD_USES_GENERIC_AUDIO := true
diff --git a/target/product/core.mk b/target/product/core.mk
index d79b1e1..204345e 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -12,13 +12,18 @@
Launcher \
HTMLViewer \
Phone \
+ ApplicationsProvider \
ContactsProvider \
DownloadProvider \
GoogleSearch \
MediaProvider \
+ PicoTts \
SettingsProvider \
TelephonyProvider \
+ TtsService \
+ VpnServices \
UserDictionaryProvider \
PackageInstaller \
+ WebSearchProvider \
Bugreport
diff --git a/target/product/min_dev.mk b/target/product/min_dev.mk
index 7d0fbe6..005af70 100644
--- a/target/product/min_dev.mk
+++ b/target/product/min_dev.mk
@@ -12,6 +12,7 @@
MediaProvider \
SettingsProvider \
PackageInstaller \
+ WebSearchProvider \
Bugreport \
Launcher \
Settings \
diff --git a/target/product/sdk.mk b/target/product/sdk.mk
index 5c276cf..e9881bf 100644
--- a/target/product/sdk.mk
+++ b/target/product/sdk.mk
@@ -22,8 +22,10 @@
OpenWnn \
libWnnEngDic \
libWnnJpnDic \
+ libWnnZHCNDic \
libwnndict \
ApiDemos \
+ GestureBuilder \
SoftKeyboard
PRODUCT_COPY_FILES := \
diff --git a/tools/apicheck/src/com/android/apicheck/ApiCheck.java b/tools/apicheck/src/com/android/apicheck/ApiCheck.java
index f78117c..20a98ce 100644
--- a/tools/apicheck/src/com/android/apicheck/ApiCheck.java
+++ b/tools/apicheck/src/com/android/apicheck/ApiCheck.java
@@ -20,7 +20,6 @@
import org.xml.sax.helpers.*;
import java.io.*;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Stack;
public class ApiCheck {
@@ -83,62 +82,62 @@
}
}
- String xmlFileName = args.get(0);
- String xmlFileNameNew = args.get(1);
- XMLReader xmlreader = null;
- try {
- // parse the XML files into our data structures
- xmlreader = XMLReaderFactory.createXMLReader();
- ApiCheck acheck = new ApiCheck();
- MakeHandler handler = acheck.new MakeHandler();
- xmlreader.setContentHandler(handler);
- xmlreader.setErrorHandler(handler);
- FileReader filereader = new FileReader(xmlFileName);
- xmlreader.parse(new InputSource(filereader));
- FileReader filereaderNew = new FileReader(xmlFileNameNew);
- xmlreader.parse(new InputSource(filereaderNew));
+ ApiCheck acheck = new ApiCheck();
- // establish the superclass relationships
- handler.getOldApi().resolveSuperclasses();
- handler.getNewApi().resolveSuperclasses();
-
- // finally, run the consistency check
- handler.getOldApi().isConsistent(handler.getNewApi());
+ ApiInfo oldApi = acheck.parseApi(args.get(0));
+ ApiInfo newApi = acheck.parseApi(args.get(1));
- } catch (SAXParseException e) {
- Errors.error(Errors.PARSE_ERROR,
- new SourcePositionInfo(xmlFileName, e.getLineNumber(), 0),
- e.getMessage());
- } catch (Exception e) {
- e.printStackTrace();
- Errors.error(Errors.PARSE_ERROR,
- new SourcePositionInfo(xmlFileName, 0, 0),
- e.getMessage());
- }
+ // only run the consistency check if we haven't had XML parse errors
+ if (!Errors.hadError) {
+ oldApi.isConsistent(newApi);
+ }
Errors.printErrors();
System.exit(Errors.hadError ? 1 : 0);
}
- private class MakeHandler extends DefaultHandler {
+ public ApiInfo parseApi(String xmlFile) {
+ FileReader fileReader = null;
+ try {
+ XMLReader xmlreader = XMLReaderFactory.createXMLReader();
+ MakeHandler handler = new MakeHandler();
+ xmlreader.setContentHandler(handler);
+ xmlreader.setErrorHandler(handler);
+ fileReader = new FileReader(xmlFile);
+ xmlreader.parse(new InputSource(fileReader));
+ ApiInfo apiInfo = handler.getApi();
+ apiInfo.resolveSuperclasses();
+ return apiInfo;
+ } catch (SAXParseException e) {
+ Errors.error(Errors.PARSE_ERROR,
+ new SourcePositionInfo(xmlFile, e.getLineNumber(), 0),
+ e.getMessage());
+ } catch (Exception e) {
+ e.printStackTrace();
+ Errors.error(Errors.PARSE_ERROR,
+ new SourcePositionInfo(xmlFile, 0, 0), e.getMessage());
+ } finally {
+ if (fileReader != null) {
+ try {
+ fileReader.close();
+ } catch (IOException ignored) {}
+ }
+ }
+ return null;
+ }
+
+ private static class MakeHandler extends DefaultHandler {
- private Integer mWarningCount;
- private ApiInfo mOriginalApi;
- private ApiInfo mNewApi;
- private boolean mOldApi;
+ private ApiInfo mApi;
private PackageInfo mCurrentPackage;
private ClassInfo mCurrentClass;
private AbstractMethodInfo mCurrentMethod;
- private ConstructorInfo mCurrentConstructor;
private Stack<ClassInfo> mClassScope = new Stack<ClassInfo>();
-
-
+
+
public MakeHandler() {
super();
- mOriginalApi = new ApiInfo();
- mNewApi = new ApiInfo();
- mOldApi = true;
-
+ mApi = new ApiInfo();
}
public void startElement(String uri, String localName, String qName,
@@ -229,25 +228,11 @@
mCurrentPackage.addClass(mCurrentClass);
mCurrentClass = mClassScope.pop();
} else if (qName.equals("package")){
- if (mOldApi) {
- mOriginalApi.addPackage(mCurrentPackage);
- } else {
- mNewApi.addPackage(mCurrentPackage);
- }
+ mApi.addPackage(mCurrentPackage);
}
}
- public void endDocument() {
- mOldApi = !mOldApi;
+ public ApiInfo getApi() {
+ return mApi;
}
-
- public ApiInfo getOldApi() {
- return mOriginalApi;
- }
-
- public ApiInfo getNewApi() {
- return mNewApi;
- }
-
-
- }
+ }
}
diff --git a/tools/apicheck/src/com/android/apicheck/ClassInfo.java b/tools/apicheck/src/com/android/apicheck/ClassInfo.java
index 4bbf78b..5405ad2 100644
--- a/tools/apicheck/src/com/android/apicheck/ClassInfo.java
+++ b/tools/apicheck/src/com/android/apicheck/ClassInfo.java
@@ -187,8 +187,8 @@
}
for (FieldInfo mInfo : mFields.values()) {
- if (cl.mFields.containsKey(mInfo.qualifiedName())) {
- if (!mInfo.isConsistent(cl.mFields.get(mInfo.qualifiedName()))) {
+ if (cl.mFields.containsKey(mInfo.name())) {
+ if (!mInfo.isConsistent(cl.mFields.get(mInfo.name()))) {
consistent = false;
}
} else {
@@ -267,7 +267,7 @@
}
public void addField(FieldInfo fInfo) {
- mFields.put(fInfo.qualifiedName(), fInfo);
+ mFields.put(fInfo.name(), fInfo);
}
@@ -279,4 +279,26 @@
return mExistsInBoth;
}
+ public Map<String, ConstructorInfo> allConstructors() {
+ return mConstructors;
+ }
+
+ public Map<String, FieldInfo> allFields() {
+ return mFields;
+ }
+
+ public Map<String, MethodInfo> allMethods() {
+ return mMethods;
+ }
+
+ /**
+ * Returns the class hierarchy for this class, starting with this class.
+ */
+ public Iterable<ClassInfo> hierarchy() {
+ List<ClassInfo> result = new ArrayList<ClassInfo>(4);
+ for (ClassInfo c = this; c != null; c = c.mSuperClass) {
+ result.add(c);
+ }
+ return result;
+ }
}
diff --git a/tools/apicheck/src/com/android/apicheck/ConstructorInfo.java b/tools/apicheck/src/com/android/apicheck/ConstructorInfo.java
index 57d7617..f36c7cd 100644
--- a/tools/apicheck/src/com/android/apicheck/ConstructorInfo.java
+++ b/tools/apicheck/src/com/android/apicheck/ConstructorInfo.java
@@ -55,11 +55,12 @@
}
public String getHashableName() {
- String returnString = qualifiedName();
+ StringBuilder result = new StringBuilder();
+ result.append(name());
for (ParameterInfo pInfo : mParameters) {
- returnString += ":" + pInfo.getType();
+ result.append(":").append(pInfo.getType());
}
- return returnString;
+ return result.toString();
}
public boolean isInBoth() {
diff --git a/tools/apicheck/src/com/android/apicheck/MethodInfo.java b/tools/apicheck/src/com/android/apicheck/MethodInfo.java
index 86e20de..e4e4537 100644
--- a/tools/apicheck/src/com/android/apicheck/MethodInfo.java
+++ b/tools/apicheck/src/com/android/apicheck/MethodInfo.java
@@ -195,7 +195,7 @@
}
public String getHashableName() {
- return qualifiedName() + getParameterHash();
+ return name() + getParameterHash();
}
public String getSignature() {
diff --git a/tools/applypatch/Android.mk b/tools/applypatch/Android.mk
index 09f9862..9a6d2be 100644
--- a/tools/applypatch/Android.mk
+++ b/tools/applypatch/Android.mk
@@ -12,18 +12,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+ifneq ($(TARGET_SIMULATOR),true)
+
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
-ifneq ($(TARGET_SIMULATOR),true)
+LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c imgpatch.c utils.c
+LOCAL_MODULE := libapplypatch
+LOCAL_MODULE_TAGS := eng
+LOCAL_C_INCLUDES += external/bzip2 external/zlib bootable/recovery
+LOCAL_STATIC_LIBRARIES += libmtdutils libmincrypt libbz libz
-LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c
+include $(BUILD_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := main.c
LOCAL_MODULE := applypatch
LOCAL_FORCE_STATIC_EXECUTABLE := true
LOCAL_MODULE_TAGS := eng
-LOCAL_C_INCLUDES += external/bzip2
-LOCAL_STATIC_LIBRARIES += libmincrypt libbz libc
+LOCAL_STATIC_LIBRARIES += libapplypatch
+LOCAL_STATIC_LIBRARIES += libmtdutils libmincrypt libbz libz
+LOCAL_STATIC_LIBRARIES += libcutils libstdc++ libc
include $(BUILD_EXECUTABLE)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := imgdiff.c utils.c
+LOCAL_MODULE := imgdiff
+LOCAL_FORCE_STATIC_EXECUTABLE := true
+LOCAL_MODULE_TAGS := eng
+LOCAL_C_INCLUDES += external/zlib
+LOCAL_STATIC_LIBRARIES += libz
+
+include $(BUILD_HOST_EXECUTABLE)
+
endif # !TARGET_SIMULATOR
diff --git a/tools/applypatch/applypatch.c b/tools/applypatch/applypatch.c
index 9954869..06089ea 100644
--- a/tools/applypatch/applypatch.c
+++ b/tools/applypatch/applypatch.c
@@ -25,12 +25,25 @@
#include "mincrypt/sha.h"
#include "applypatch.h"
+#include "mtdutils/mtdutils.h"
+
+int SaveFileContents(const char* filename, FileContents file);
+int LoadMTDContents(const char* filename, FileContents* file);
+int ParseSha1(const char* str, uint8_t* digest);
+
+static int mtd_partitions_scanned = 0;
// Read a file into memory; store it and its associated metadata in
// *file. Return 0 on success.
int LoadFileContents(const char* filename, FileContents* file) {
file->data = NULL;
+ // A special 'filename' beginning with "MTD:" means to load the
+ // contents of an MTD partition.
+ if (strncmp(filename, "MTD:", 4) == 0) {
+ return LoadMTDContents(filename, file);
+ }
+
if (stat(filename, &file->st) != 0) {
fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
return -1;
@@ -43,6 +56,7 @@
if (f == NULL) {
fprintf(stderr, "failed to open \"%s\": %s\n", filename, strerror(errno));
free(file->data);
+ file->data = NULL;
return -1;
}
@@ -51,6 +65,7 @@
fprintf(stderr, "short read of \"%s\" (%d bytes of %d)\n",
filename, bytes_read, file->size);
free(file->data);
+ file->data = NULL;
return -1;
}
fclose(f);
@@ -59,6 +74,182 @@
return 0;
}
+static size_t* size_array;
+// comparison function for qsort()ing an int array of indexes into
+// size_array[].
+static int compare_size_indices(const void* a, const void* b) {
+ int aa = *(int*)a;
+ int bb = *(int*)b;
+ if (size_array[aa] < size_array[bb]) {
+ return -1;
+ } else if (size_array[aa] > size_array[bb]) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+// Load the contents of an MTD partition into the provided
+// FileContents. filename should be a string of the form
+// "MTD:<partition_name>:<size_1>:<sha1_1>:<size_2>:<sha1_2>:...".
+// The smallest size_n bytes for which that prefix of the mtd contents
+// has the corresponding sha1 hash will be loaded. It is acceptable
+// for a size value to be repeated with different sha1s. Will return
+// 0 on success.
+//
+// This complexity is needed because if an OTA installation is
+// interrupted, the partition might contain either the source or the
+// target data, which might be of different lengths. We need to know
+// the length in order to read from MTD (there is no "end-of-file"
+// marker), so the caller must specify the possible lengths and the
+// hash of the data, and we'll do the load expecting to find one of
+// those hashes.
+int LoadMTDContents(const char* filename, FileContents* file) {
+ char* copy = strdup(filename);
+ const char* magic = strtok(copy, ":");
+ if (strcmp(magic, "MTD") != 0) {
+ fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
+ filename);
+ return -1;
+ }
+ const char* partition = strtok(NULL, ":");
+
+ int i;
+ int colons = 0;
+ for (i = 0; filename[i] != '\0'; ++i) {
+ if (filename[i] == ':') {
+ ++colons;
+ }
+ }
+ if (colons < 3 || colons%2 == 0) {
+ fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
+ filename);
+ }
+
+ int pairs = (colons-1)/2; // # of (size,sha1) pairs in filename
+ int* index = malloc(pairs * sizeof(int));
+ size_t* size = malloc(pairs * sizeof(size_t));
+ char** sha1sum = malloc(pairs * sizeof(char*));
+
+ for (i = 0; i < pairs; ++i) {
+ const char* size_str = strtok(NULL, ":");
+ size[i] = strtol(size_str, NULL, 10);
+ if (size[i] == 0) {
+ fprintf(stderr, "LoadMTDContents called with bad size (%s)\n", filename);
+ return -1;
+ }
+ sha1sum[i] = strtok(NULL, ":");
+ index[i] = i;
+ }
+
+ // sort the index[] array so it indexes the pairs in order of
+ // increasing size.
+ size_array = size;
+ qsort(index, pairs, sizeof(int), compare_size_indices);
+
+ if (!mtd_partitions_scanned) {
+ mtd_scan_partitions();
+ mtd_partitions_scanned = 1;
+ }
+
+ const MtdPartition* mtd = mtd_find_partition_by_name(partition);
+ if (mtd == NULL) {
+ fprintf(stderr, "mtd partition \"%s\" not found (loading %s)\n",
+ partition, filename);
+ return -1;
+ }
+
+ MtdReadContext* ctx = mtd_read_partition(mtd);
+ if (ctx == NULL) {
+ fprintf(stderr, "failed to initialize read of mtd partition \"%s\"\n",
+ partition);
+ return -1;
+ }
+
+ SHA_CTX sha_ctx;
+ SHA_init(&sha_ctx);
+ uint8_t parsed_sha[SHA_DIGEST_SIZE];
+
+ // allocate enough memory to hold the largest size.
+ file->data = malloc(size[index[pairs-1]]);
+ char* p = (char*)file->data;
+ file->size = 0; // # bytes read so far
+
+ for (i = 0; i < pairs; ++i) {
+ // Read enough additional bytes to get us up to the next size
+ // (again, we're trying the possibilities in order of increasing
+ // size).
+ size_t next = size[index[i]] - file->size;
+ size_t read = 0;
+ if (next > 0) {
+ read = mtd_read_data(ctx, p, next);
+ if (next != read) {
+ fprintf(stderr, "short read (%d bytes of %d) for partition \"%s\"\n",
+ read, next, partition);
+ free(file->data);
+ file->data = NULL;
+ return -1;
+ }
+ SHA_update(&sha_ctx, p, read);
+ file->size += read;
+ }
+
+ // Duplicate the SHA context and finalize the duplicate so we can
+ // check it against this pair's expected hash.
+ SHA_CTX temp_ctx;
+ memcpy(&temp_ctx, &sha_ctx, sizeof(SHA_CTX));
+ const uint8_t* sha_so_far = SHA_final(&temp_ctx);
+
+ if (ParseSha1(sha1sum[index[i]], parsed_sha) != 0) {
+ fprintf(stderr, "failed to parse sha1 %s in %s\n",
+ sha1sum[index[i]], filename);
+ free(file->data);
+ file->data = NULL;
+ return -1;
+ }
+
+ if (memcmp(sha_so_far, parsed_sha, SHA_DIGEST_SIZE) == 0) {
+ // we have a match. stop reading the partition; we'll return
+ // the data we've read so far.
+ printf("mtd read matched size %d sha %s\n",
+ size[index[i]], sha1sum[index[i]]);
+ break;
+ }
+
+ p += read;
+ }
+
+ mtd_read_close(ctx);
+
+ if (i == pairs) {
+ // Ran off the end of the list of (size,sha1) pairs without
+ // finding a match.
+ fprintf(stderr, "contents of MTD partition \"%s\" didn't match %s\n",
+ partition, filename);
+ free(file->data);
+ file->data = NULL;
+ return -1;
+ }
+
+ const uint8_t* sha_final = SHA_final(&sha_ctx);
+ for (i = 0; i < SHA_DIGEST_SIZE; ++i) {
+ file->sha1[i] = sha_final[i];
+ }
+
+ // Fake some stat() info.
+ file->st.st_mode = 0644;
+ file->st.st_uid = 0;
+ file->st.st_gid = 0;
+
+ free(copy);
+ free(index);
+ free(size);
+ free(sha1sum);
+
+ return 0;
+}
+
+
// Save the contents of the given FileContents object under the given
// filename. Return 0 on success.
int SaveFileContents(const char* filename, FileContents file) {
@@ -91,6 +282,76 @@
return 0;
}
+// Copy the contents of source_file to target_mtd partition, a string
+// of the form "MTD:<partition>[:...]". Return 0 on success.
+int CopyToMTDPartition(const char* source_file, const char* target_mtd) {
+ char* partition = strchr(target_mtd, ':');
+ if (partition == NULL) {
+ fprintf(stderr, "bad MTD target name \"%s\"\n", target_mtd);
+ return -1;
+ }
+ ++partition;
+ // Trim off anything after a colon, eg "MTD:boot:blah:blah:blah...".
+ // We want just the partition name "boot".
+ partition = strdup(partition);
+ char* end = strchr(partition, ':');
+ if (end != NULL)
+ *end = '\0';
+
+ FILE* f = fopen(source_file, "rb");
+ if (f == NULL) {
+ fprintf(stderr, "failed to open %s for reading: %s\n",
+ source_file, strerror(errno));
+ return -1;
+ }
+
+ if (!mtd_partitions_scanned) {
+ mtd_scan_partitions();
+ mtd_partitions_scanned = 1;
+ }
+
+ const MtdPartition* mtd = mtd_find_partition_by_name(partition);
+ if (mtd == NULL) {
+ fprintf(stderr, "mtd partition \"%s\" not found for writing\n", partition);
+ return -1;
+ }
+
+ MtdWriteContext* ctx = mtd_write_partition(mtd);
+ if (ctx == NULL) {
+ fprintf(stderr, "failed to init mtd partition \"%s\" for writing\n",
+ partition);
+ return -1;
+ }
+
+ const int buffer_size = 4096;
+ char buffer[buffer_size];
+ size_t read;
+ while ((read = fread(buffer, 1, buffer_size, f)) > 0) {
+ size_t written = mtd_write_data(ctx, buffer, read);
+ if (written != read) {
+ fprintf(stderr, "only wrote %d of %d bytes to MTD %s\n",
+ written, read, partition);
+ mtd_write_close(ctx);
+ return -1;
+ }
+ }
+
+ fclose(f);
+ if (mtd_erase_blocks(ctx, -1) < 0) {
+ fprintf(stderr, "error finishing mtd write of %s\n", partition);
+ mtd_write_close(ctx);
+ return -1;
+ }
+
+ if (mtd_write_close(ctx)) {
+ fprintf(stderr, "error closing mtd write of %s\n", partition);
+ return -1;
+ }
+
+ free(partition);
+ return 0;
+}
+
// Take a string 'str' of 40 hex digits and parse it into the 20
// byte array 'digest'. 'str' may contain only the digest or be of
@@ -176,8 +437,13 @@
FileContents file;
file.data = NULL;
+ // It's okay to specify no sha1s; the check will pass if the
+ // LoadFileContents is successful. (Useful for reading MTD
+ // partitions, where the filename encodes the sha1s; no need to
+ // check them twice.)
if (LoadFileContents(argv[2], &file) != 0 ||
- FindMatchingPatch(file.sha1, patches, num_patches) == NULL) {
+ (num_patches > 0 &&
+ FindMatchingPatch(file.sha1, patches, num_patches) == NULL)) {
fprintf(stderr, "file \"%s\" doesn't have any of expected "
"sha1 sums; checking cache\n", argv[2]);
@@ -226,27 +492,57 @@
// replacement for it) and idempotent (it's okay to run this program
// multiple times).
//
-// - if the sha1 hash of <file> is <tgt-sha1>, does nothing and exits
+// - if the sha1 hash of <tgt-file> is <tgt-sha1>, does nothing and exits
// successfully.
//
-// - otherwise, if the sha1 hash of <file> is <src-sha1>, applies the
-// bsdiff <patch> to <file> to produce a new file (the type of patch
+// - otherwise, if the sha1 hash of <src-file> is <src-sha1>, applies the
+// bsdiff <patch> to <src-file> to produce a new file (the type of patch
// is automatically detected from the file header). If that new
-// file has sha1 hash <tgt-sha1>, moves it to replace <file>, and
-// exits successfully.
+// file has sha1 hash <tgt-sha1>, moves it to replace <tgt-file>, and
+// exits successfully. Note that if <src-file> and <tgt-file> are
+// not the same, <src-file> is NOT deleted on success. <tgt-file>
+// may be the string "-" to mean "the same as src-file".
//
// - otherwise, or if any error is encountered, exits with non-zero
// status.
+//
+// <src-file> (or <file> in check mode) may refer to an MTD partition
+// to read the source data. See the comments for the
+// LoadMTDContents() function above for the format of such a filename.
+//
+//
+// As you might guess from the arguments, this function used to be
+// main(); it was split out this way so applypatch could be built as a
+// static library and linked into other executables as well. In the
+// future only the library form will exist; we will not need to build
+// this as a standalone executable.
+//
+// The arguments to this function are just the command-line of the
+// standalone executable:
+//
+// <src-file> <tgt-file> <tgt-sha1> <tgt-size> [<src-sha1>:<patch> ...]
+// to apply a patch. Returns 0 on success, 1 on failure.
+//
+// "-c" <file> [<sha1> ...]
+// to check a file's contents against zero or more sha1s. Returns
+// 0 if it matches any of them, 1 if it doesn't.
+//
+// "-s" <bytes>
+// returns 0 if enough free space is available on /cache; 1 if it
+// does not.
+//
+// "-l"
+// shows open-source license information and returns 0.
+//
+// This function returns 2 if the arguments are not understood (in the
+// standalone executable, this causes the usage message to be
+// printed).
+//
+// TODO: make the interface more sensible for use as a library.
-int main(int argc, char** argv) {
+int applypatch(int argc, char** argv) {
if (argc < 2) {
- usage:
- fprintf(stderr, "usage: %s <file> <tgt-sha1> <tgt-size> [<src-sha1>:<patch> ...]\n"
- " or %s -c <file> [<sha1> ...]\n"
- " or %s -s <bytes>\n"
- " or %s -l\n",
- argv[0], argv[0], argv[0], argv[0]);
- return 1;
+ return 2;
}
if (strncmp(argv[1], "-l", 3) == 0) {
@@ -259,7 +555,7 @@
if (strncmp(argv[1], "-s", 3) == 0) {
if (argc != 3) {
- goto usage;
+ return 2;
}
size_t bytes = strtol(argv[2], NULL, 10);
if (MakeFreeSpaceOnCache(bytes) < 0) {
@@ -273,26 +569,22 @@
uint8_t target_sha1[SHA_DIGEST_SIZE];
const char* source_filename = argv[1];
-
- // assume that source_filename (eg "/system/app/Foo.apk") is located
- // on the same filesystem as its top-level directory ("/system").
- // We need something that exists for calling statfs().
- char* source_fs = strdup(argv[1]);
- char* slash = strchr(source_fs+1, '/');
- if (slash != NULL) {
- *slash = '\0';
+ const char* target_filename = argv[2];
+ if (target_filename[0] == '-' &&
+ target_filename[1] == '\0') {
+ target_filename = source_filename;
}
- if (ParseSha1(argv[2], target_sha1) != 0) {
- fprintf(stderr, "failed to parse tgt-sha1 \"%s\"\n", argv[2]);
+ if (ParseSha1(argv[3], target_sha1) != 0) {
+ fprintf(stderr, "failed to parse tgt-sha1 \"%s\"\n", argv[3]);
return 1;
}
- unsigned long target_size = strtoul(argv[3], NULL, 0);
+ unsigned long target_size = strtoul(argv[4], NULL, 0);
int num_patches;
Patch* patches;
- if (ParseShaArgs(argc-4, argv+4, &patches, &num_patches) < 0) { return 1; }
+ if (ParseShaArgs(argc-5, argv+5, &patches, &num_patches) < 0) { return 1; }
FileContents copy_file;
FileContents source_file;
@@ -300,15 +592,27 @@
const char* copy_patch_filename = NULL;
int made_copy = 0;
- if (LoadFileContents(source_filename, &source_file) == 0) {
+ // We try to load the target file into the source_file object.
+ if (LoadFileContents(target_filename, &source_file) == 0) {
if (memcmp(source_file.sha1, target_sha1, SHA_DIGEST_SIZE) == 0) {
// The early-exit case: the patch was already applied, this file
// has the desired hash, nothing for us to do.
fprintf(stderr, "\"%s\" is already target; no patch needed\n",
- source_filename);
+ target_filename);
return 0;
}
+ }
+ if (source_file.data == NULL ||
+ (target_filename != source_filename &&
+ strcmp(target_filename, source_filename) != 0)) {
+ // Need to load the source file: either we failed to load the
+ // target file, or we did but it's different from the source file.
+ free(source_file.data);
+ LoadFileContents(source_filename, &source_file);
+ }
+
+ if (source_file.data != NULL) {
const Patch* to_use =
FindMatchingPatch(source_file.sha1, patches, num_patches);
if (to_use != NULL) {
@@ -339,30 +643,70 @@
}
}
- // Is there enough room in the target filesystem to hold the patched file?
- size_t free_space = FreeSpaceForFile(source_fs);
- int enough_space = free_space > (target_size * 3 / 2); // 50% margin of error
- printf("target %ld bytes; free space %ld bytes; enough %d\n",
- (long)target_size, (long)free_space, enough_space);
+ // Is there enough room in the target filesystem to hold the patched
+ // file?
- if (!enough_space && source_patch_filename != NULL) {
- // Using the original source, but not enough free space. First
- // copy the source file to cache, then delete it from the original
- // location.
+ if (strncmp(target_filename, "MTD:", 4) == 0) {
+ // If the target is an MTD partition, we're actually going to
+ // write the output to /tmp and then copy it to the partition.
+ // statfs() always returns 0 blocks free for /tmp, so instead
+ // we'll just assume that /tmp has enough space to hold the file.
+
+ // We still write the original source to cache, in case the MTD
+ // write is interrupted.
if (MakeFreeSpaceOnCache(source_file.size) < 0) {
fprintf(stderr, "not enough free space on /cache\n");
return 1;
}
-
if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
fprintf(stderr, "failed to back up source file\n");
return 1;
}
made_copy = 1;
- unlink(source_filename);
+ } else {
+ // assume that target_filename (eg "/system/app/Foo.apk") is located
+ // on the same filesystem as its top-level directory ("/system").
+ // We need something that exists for calling statfs().
+ char* target_fs = strdup(target_filename);
+ char* slash = strchr(target_fs+1, '/');
+ if (slash != NULL) {
+ *slash = '\0';
+ }
- size_t free_space = FreeSpaceForFile(source_fs);
- printf("(now %ld bytes free for source)\n", (long)free_space);
+ size_t free_space = FreeSpaceForFile(target_fs);
+ int enough_space =
+ free_space > (target_size * 3 / 2); // 50% margin of error
+ printf("target %ld bytes; free space %ld bytes; enough %d\n",
+ (long)target_size, (long)free_space, enough_space);
+
+ if (!enough_space && source_patch_filename != NULL) {
+ // Using the original source, but not enough free space. First
+ // copy the source file to cache, then delete it from the original
+ // location.
+
+ if (strncmp(source_filename, "MTD:", 4) == 0) {
+ // It's impossible to free space on the target filesystem by
+ // deleting the source if the source is an MTD partition. If
+ // we're ever in a state where we need to do this, fail.
+ fprintf(stderr, "not enough free space for target but source is MTD\n");
+ return 1;
+ }
+
+ if (MakeFreeSpaceOnCache(source_file.size) < 0) {
+ fprintf(stderr, "not enough free space on /cache\n");
+ return 1;
+ }
+
+ if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
+ fprintf(stderr, "failed to back up source file\n");
+ return 1;
+ }
+ made_copy = 1;
+ unlink(source_filename);
+
+ size_t free_space = FreeSpaceForFile(target_fs);
+ printf("(now %ld bytes free for target)\n", (long)free_space);
+ }
}
FileContents* source_to_use;
@@ -375,14 +719,19 @@
patch_filename = copy_patch_filename;
}
- // We write the decoded output to "<file>.patch".
- char* outname = (char*)malloc(strlen(source_filename) + 10);
- strcpy(outname, source_filename);
- strcat(outname, ".patch");
+ char* outname = NULL;
+ if (strncmp(target_filename, "MTD:", 4) == 0) {
+ outname = MTD_TARGET_TEMP_FILE;
+ } else {
+ // We write the decoded output to "<tgt-file>.patch".
+ outname = (char*)malloc(strlen(target_filename) + 10);
+ strcpy(outname, target_filename);
+ strcat(outname, ".patch");
+ }
FILE* output = fopen(outname, "wb");
if (output == NULL) {
- fprintf(stderr, "failed to patch file %s: %s\n",
- source_filename, strerror(errno));
+ fprintf(stderr, "failed to open output file %s: %s\n",
+ outname, strerror(errno));
return 1;
}
@@ -410,13 +759,22 @@
} else if (header_bytes_read >= 8 &&
memcmp(header, "BSDIFF40", 8) == 0) {
int result = ApplyBSDiffPatch(source_to_use->data, source_to_use->size,
- patch_filename, output, &ctx);
+ patch_filename, 0, output, &ctx);
if (result != 0) {
fprintf(stderr, "ApplyBSDiffPatch failed\n");
return result;
}
+ } else if (header_bytes_read >= 8 &&
+ memcmp(header, "IMGDIFF", 7) == 0 &&
+ (header[7] == '1' || header[7] == '2')) {
+ int result = ApplyImagePatch(source_to_use->data, source_to_use->size,
+ patch_filename, output, &ctx);
+ if (result != 0) {
+ fprintf(stderr, "ApplyImagePatch failed\n");
+ return result;
+ }
} else {
- fprintf(stderr, "Unknown patch file format");
+ fprintf(stderr, "Unknown patch file format\n");
return 1;
}
@@ -430,22 +788,32 @@
return 1;
}
- // Give the .patch file the same owner, group, and mode of the
- // original source file.
- if (chmod(outname, source_to_use->st.st_mode) != 0) {
- fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
- return 1;
- }
- if (chown(outname, source_to_use->st.st_uid, source_to_use->st.st_gid) != 0) {
- fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
- return 1;
- }
+ if (strcmp(outname, MTD_TARGET_TEMP_FILE) == 0) {
+ // Copy the temp file to the MTD partition.
+ if (CopyToMTDPartition(outname, target_filename) != 0) {
+ fprintf(stderr, "copy of %s to %s failed\n", outname, target_filename);
+ return 1;
+ }
+ unlink(outname);
+ } else {
+ // Give the .patch file the same owner, group, and mode of the
+ // original source file.
+ if (chmod(outname, source_to_use->st.st_mode) != 0) {
+ fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
+ return 1;
+ }
+ if (chown(outname, source_to_use->st.st_uid,
+ source_to_use->st.st_gid) != 0) {
+ fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
+ return 1;
+ }
- // Finally, rename the .patch file to replace the original source file.
- if (rename(outname, source_filename) != 0) {
- fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
- source_filename, strerror(errno));
- return 1;
+ // Finally, rename the .patch file to replace the target file.
+ if (rename(outname, target_filename) != 0) {
+ fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
+ target_filename, strerror(errno));
+ return 1;
+ }
}
// If this run of applypatch created the copy, and we're here, we
diff --git a/tools/applypatch/applypatch.h b/tools/applypatch/applypatch.h
index 76fc80a..ccd8424 100644
--- a/tools/applypatch/applypatch.h
+++ b/tools/applypatch/applypatch.h
@@ -17,6 +17,7 @@
#ifndef _APPLYPATCH_H
#define _APPLYPATCH_H
+#include <sys/stat.h>
#include "mincrypt/sha.h"
typedef struct _Patch {
@@ -38,12 +39,26 @@
// and use it as the source instead.
#define CACHE_TEMP_SOURCE "/cache/saved.file"
+// When writing to an MTD partition, we first put the output in this
+// temp file, then copy it to the partition once the patching is
+// finished (and the target sha1 verified).
+#define MTD_TARGET_TEMP_FILE "/tmp/mtd-temp"
+
// applypatch.c
size_t FreeSpaceForFile(const char* filename);
+int applypatch(int argc, char** argv);
// bsdiff.c
void ShowBSDiffLicense();
int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
+ const char* patch_filename, ssize_t offset,
+ FILE* output, SHA_CTX* ctx);
+int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
+ const char* patch_filename, ssize_t patch_offset,
+ unsigned char** new_data, ssize_t* new_size);
+
+// imgpatch.c
+int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
const char* patch_filename,
FILE* output, SHA_CTX* ctx);
diff --git a/tools/applypatch/applypatch.sh b/tools/applypatch/applypatch.sh
index 181cd5c..88f3025 100755
--- a/tools/applypatch/applypatch.sh
+++ b/tools/applypatch/applypatch.sh
@@ -24,16 +24,22 @@
# partition that WORK_DIR is located on, without the leading slash
WORK_FS=system
+# set to 0 to use a device instead
+USE_EMULATOR=1
+
# ------------------------
tmpdir=$(mktemp -d)
-emulator -wipe-data -noaudio -no-window -port $EMULATOR_PORT &
-pid_emulator=$!
+if [ "$USE_EMULATOR" == 1 ]; then
+ emulator -wipe-data -noaudio -no-window -port $EMULATOR_PORT &
+ pid_emulator=$!
+ ADB="adb -s emulator-$EMULATOR_PORT "
+else
+ ADB="adb -d "
+fi
-ADB="adb -s emulator-$EMULATOR_PORT "
-
-echo "emulator is $pid_emulator; waiting for startup"
+echo "waiting to connect to device"
$ADB wait-for-device
echo "device is available"
$ADB remount
@@ -56,7 +62,8 @@
echo
echo FAIL: $testname
echo
- kill $pid_emulator
+ [ "$open_pid" == "" ] || kill $open_pid
+ [ "$pid_emulator" == "" ] || kill $pid_emulator
exit 1
}
@@ -68,6 +75,23 @@
run_command df | awk "/$1/ {print gensub(/K/, \"\", \"g\", \$6)}"
}
+cleanup() {
+ # not necessary if we're about to kill the emulator, but nice for
+ # running on real devices or already-running emulators.
+ testname "removing test files"
+ run_command rm $WORK_DIR/bloat.dat
+ run_command rm $WORK_DIR/old.file
+ run_command rm $WORK_DIR/patch.bsdiff
+ run_command rm $WORK_DIR/applypatch
+ run_command rm $CACHE_TEMP_SOURCE
+ run_command rm /cache/bloat*.dat
+
+ [ "$pid_emulator" == "" ] || kill $pid_emulator
+
+ rm -rf $tmpdir
+}
+
+cleanup
$ADB push $ANDROID_PRODUCT_OUT/system/bin/applypatch $WORK_DIR/applypatch
@@ -146,16 +170,71 @@
fi
testname "apply bsdiff patch"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
testname "reapply bsdiff patch"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
+# --------------- apply patch in new location ----------------------
+
+$ADB push $DATA_DIR/old.file $WORK_DIR
+$ADB push $DATA_DIR/patch.bsdiff $WORK_DIR
+
+# Check that the partition has enough space to apply the patch without
+# copying. If it doesn't, we'll be testing the low-space condition
+# when we intend to test the not-low-space condition.
+testname "apply patch to new location (with enough space)"
+free_kb=$(free_space $WORK_FS)
+echo "${free_kb}kb free on /$WORK_FS."
+if (( free_kb * 1024 < NEW_SIZE * 3 / 2 )); then
+ echo "Not enough space on /$WORK_FS to patch test file."
+ echo
+ echo "This doesn't mean that applypatch is necessarily broken;"
+ echo "just that /$WORK_FS doesn't have enough free space to"
+ echo "properly run this test."
+ exit 1
+fi
+
+run_command rm $WORK_DIR/new.file
+run_command rm $CACHE_TEMP_SOURCE
+
+testname "apply bsdiff patch to new location"
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+$ADB pull $WORK_DIR/new.file $tmpdir/patched
+diff -q $DATA_DIR/new.file $tmpdir/patched || fail
+
+testname "reapply bsdiff patch to new location"
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+$ADB pull $WORK_DIR/new.file $tmpdir/patched
+diff -q $DATA_DIR/new.file $tmpdir/patched || fail
+
+$ADB push $DATA_DIR/old.file $CACHE_TEMP_SOURCE
+# put some junk in the old file
+run_command dd if=/dev/urandom of=$WORK_DIR/old.file count=100 bs=1024 || fail
+
+testname "apply bsdiff patch to new location with corrupted source"
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo || fail
+$ADB pull $WORK_DIR/new.file $tmpdir/patched
+diff -q $DATA_DIR/new.file $tmpdir/patched || fail
+
+# put some junk in the cache copy, too
+run_command dd if=/dev/urandom of=$CACHE_TEMP_SOURCE count=100 bs=1024 || fail
+
+run_command rm $WORK_DIR/new.file
+testname "apply bsdiff patch to new location with corrupted source and copy (no new file)"
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo && fail
+
+# put some junk in the new file
+run_command dd if=/dev/urandom of=$WORK_DIR/new.file count=100 bs=1024 || fail
+
+testname "apply bsdiff patch to new location with corrupted source and copy (bad new file)"
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file $WORK_DIR/new.file $NEW_SHA1 $NEW_SIZE $OLD_SHA1:$WORK_DIR/patch.bsdiff $BAD1_SHA1:$WORK_DIR/foo && fail
+
# --------------- apply patch with low space on /system ----------------------
$ADB push $DATA_DIR/old.file $WORK_DIR
@@ -169,12 +248,12 @@
echo "${free_kb}kb free on /$WORK_FS now."
testname "apply bsdiff patch with low space"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
testname "reapply bsdiff patch with low space"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
@@ -213,7 +292,7 @@
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
# should fail; not enough files can be deleted
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff && fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff && fail
run_command ls /cache/bloat_large.dat || fail # wasn't deleted because it was open
run_command ls /cache/subdir/a.file || fail # wasn't deleted because it's in a subdir
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
@@ -229,7 +308,7 @@
run_command ls $CACHE_TEMP_SOURCE || fail # wasn't deleted because it's the source file copy
# should succeed
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
run_command ls /cache/subdir/a.file || fail # still wasn't deleted because it's in a subdir
@@ -242,7 +321,7 @@
run_command dd if=/dev/urandom of=$WORK_DIR/old.file count=100 bs=1024 || fail
testname "apply bsdiff patch from cache (corrupted source) with low space"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
@@ -251,20 +330,14 @@
run_command rm $WORK_DIR/old.file
testname "apply bsdiff patch from cache (missing source) with low space"
-run_command $WORK_DIR/applypatch $WORK_DIR/old.file $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
+run_command $WORK_DIR/applypatch $WORK_DIR/old.file - $NEW_SHA1 $NEW_SIZE $BAD1_SHA1:$WORK_DIR/foo $OLD_SHA1:$WORK_DIR/patch.bsdiff || fail
$ADB pull $WORK_DIR/old.file $tmpdir/patched
diff -q $DATA_DIR/new.file $tmpdir/patched || fail
# --------------- cleanup ----------------------
-# not necessary if we're about to kill the emulator, but nice for
-# running on real devices or already-running emulators.
-run_command rm /cache/bloat*.dat $WORK_DIR/bloat.dat $CACHE_TEMP_SOURCE $WORK_DIR/old.file $WORK_DIR/patch.xdelta3 $WORK_DIR/patch.bsdiff $WORK_DIR/applypatch
-
-kill $pid_emulator
-
-rm -rf $tmpdir
+cleanup
echo
echo PASS
diff --git a/tools/applypatch/bsdiff.c b/tools/applypatch/bsdiff.c
index a2851f9..9d55f3b 100644
--- a/tools/applypatch/bsdiff.c
+++ b/tools/applypatch/bsdiff.c
@@ -29,6 +29,7 @@
#include <bzlib.h>
#include "mincrypt/sha.h"
+#include "applypatch.h"
void ShowBSDiffLicense() {
puts("The bsdiff library used herein is:\n"
@@ -80,10 +81,34 @@
return y;
}
+
int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
- const char* patch_filename,
+ const char* patch_filename, ssize_t patch_offset,
FILE* output, SHA_CTX* ctx) {
+ unsigned char* new_data;
+ ssize_t new_size;
+ if (ApplyBSDiffPatchMem(old_data, old_size, patch_filename, patch_offset,
+ &new_data, &new_size) != 0) {
+ return -1;
+ }
+
+ if (fwrite(new_data, 1, new_size, output) < new_size) {
+ fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
+ return 1;
+ }
+ if (ctx) {
+ SHA_update(ctx, new_data, new_size);
+ }
+ free(new_data);
+
+ return 0;
+}
+
+int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
+ const char* patch_filename, ssize_t patch_offset,
+ unsigned char** new_data, ssize_t* new_size) {
+
FILE* f;
if ((f = fopen(patch_filename, "rb")) == NULL) {
fprintf(stderr, "failed to open patch file\n");
@@ -102,6 +127,8 @@
// from oldfile to x bytes from the diff block; copy y bytes from the
// extra block; seek forwards in oldfile by z bytes".
+ fseek(f, patch_offset, SEEK_SET);
+
unsigned char header[32];
if (fread(header, 1, 32, f) < 32) {
fprintf(stderr, "failed to read patch file header\n");
@@ -109,17 +136,16 @@
}
if (memcmp(header, "BSDIFF40", 8) != 0) {
- fprintf(stderr, "corrupt patch file header (magic number)\n");
+ fprintf(stderr, "corrupt bsdiff patch file header (magic number)\n");
return 1;
}
ssize_t ctrl_len, data_len;
- ssize_t new_size;
ctrl_len = offtin(header+8);
data_len = offtin(header+16);
- new_size = offtin(header+24);
+ *new_size = offtin(header+24);
- if (ctrl_len < 0 || data_len < 0 || new_size < 0) {
+ if (ctrl_len < 0 || data_len < 0 || *new_size < 0) {
fprintf(stderr, "corrupt patch file header (data lengths)\n");
return 1;
}
@@ -135,7 +161,7 @@
fprintf(stderr, "failed to open patch file\n"); \
return 1; \
} \
- if (fseeko(f, offset, SEEK_SET)) { \
+ if (fseeko(f, offset+patch_offset, SEEK_SET)) { \
fprintf(stderr, "failed to seek in patch file\n"); \
return 1; \
} \
@@ -150,9 +176,10 @@
#undef OPEN_AT
- unsigned char* new_data = malloc(new_size);
- if (new_data == NULL) {
- fprintf(stderr, "failed to allocate memory for output file\n");
+ *new_data = malloc(*new_size);
+ if (*new_data == NULL) {
+ fprintf(stderr, "failed to allocate %d bytes of memory for output file\n",
+ (int)*new_size);
return 1;
}
@@ -161,7 +188,7 @@
off_t len_read;
int i;
unsigned char buf[8];
- while (newpos < new_size) {
+ while (newpos < *new_size) {
// Read control data
for (i = 0; i < 3; ++i) {
len_read = BZ2_bzRead(&bzerr, cpfbz2, buf, 8);
@@ -173,13 +200,13 @@
}
// Sanity check
- if (newpos + ctrl[0] > new_size) {
+ if (newpos + ctrl[0] > *new_size) {
fprintf(stderr, "corrupt patch (new file overrun)\n");
return 1;
}
// Read diff string
- len_read = BZ2_bzRead(&bzerr, dpfbz2, new_data + newpos, ctrl[0]);
+ len_read = BZ2_bzRead(&bzerr, dpfbz2, *new_data + newpos, ctrl[0]);
if (len_read < ctrl[0] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
fprintf(stderr, "corrupt patch (read diff)\n");
return 1;
@@ -188,7 +215,7 @@
// Add old data to diff string
for (i = 0; i < ctrl[0]; ++i) {
if ((oldpos+i >= 0) && (oldpos+i < old_size)) {
- new_data[newpos+i] += old_data[oldpos+i];
+ (*new_data)[newpos+i] += old_data[oldpos+i];
}
}
@@ -197,13 +224,13 @@
oldpos += ctrl[0];
// Sanity check
- if (newpos + ctrl[1] > new_size) {
+ if (newpos + ctrl[1] > *new_size) {
fprintf(stderr, "corrupt patch (new file overrun)\n");
return 1;
}
// Read extra string
- len_read = BZ2_bzRead(&bzerr, epfbz2, new_data + newpos, ctrl[1]);
+ len_read = BZ2_bzRead(&bzerr, epfbz2, *new_data + newpos, ctrl[1]);
if (len_read < ctrl[1] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
fprintf(stderr, "corrupt patch (read extra)\n");
return 1;
@@ -221,12 +248,5 @@
fclose(dpf);
fclose(epf);
- if (fwrite(new_data, 1, new_size, output) < new_size) {
- fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
- return 1;
- }
- SHA_update(ctx, new_data, new_size);
- free(new_data);
-
return 0;
}
diff --git a/tools/applypatch/imgdiff.c b/tools/applypatch/imgdiff.c
new file mode 100644
index 0000000..51835b4
--- /dev/null
+++ b/tools/applypatch/imgdiff.c
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This program constructs binary patches for images -- such as boot.img
+ * and recovery.img -- that consist primarily of large chunks of gzipped
+ * data interspersed with uncompressed data. Doing a naive bsdiff of
+ * these files is not useful because small changes in the data lead to
+ * large changes in the compressed bitstream; bsdiff patches of gzipped
+ * data are typically as large as the data itself.
+ *
+ * To patch these usefully, we break the source and target images up into
+ * chunks of two types: "normal" and "gzip". Normal chunks are simply
+ * patched using a plain bsdiff. Gzip chunks are first expanded, then a
+ * bsdiff is applied to the uncompressed data, then the patched data is
+ * gzipped using the same encoder parameters. Patched chunks are
+ * concatenated together to create the output file; the output image
+ * should be *exactly* the same series of bytes as the target image used
+ * originally to generate the patch.
+ *
+ * To work well with this tool, the gzipped sections of the target
+ * image must have been generated using the same deflate encoder that
+ * is available in applypatch, namely, the one in the zlib library.
+ * In practice this means that images should be compressed using the
+ * "minigzip" tool included in the zlib distribution, not the GNU gzip
+ * program.
+ *
+ * An "imgdiff" patch consists of a header describing the chunk structure
+ * of the file and any encoding parameters needed for the gzipped
+ * chunks, followed by N bsdiff patches, one per chunk.
+ *
+ * For a diff to be generated, the source and target images must have the
+ * same "chunk" structure: that is, the same number of gzipped and normal
+ * chunks in the same order. Android boot and recovery images currently
+ * consist of five chunks: a small normal header, a gzipped kernel, a
+ * small normal section, a gzipped ramdisk, and finally a small normal
+ * footer.
+ *
+ * Caveats: we locate gzipped sections within the source and target
+ * images by searching for the byte sequence 1f8b0800: 1f8b is the gzip
+ * magic number; 08 specifies the "deflate" encoding [the only encoding
+ * supported by the gzip standard]; and 00 is the flags byte. We do not
+ * currently support any extra header fields (which would be indicated by
+ * a nonzero flags byte). We also don't handle the case when that byte
+ * sequence appears spuriously in the file. (Note that it would have to
+ * occur spuriously within a normal chunk to be a problem.)
+ *
+ *
+ * The imgdiff patch header looks like this:
+ *
+ * "IMGDIFF1" (8) [magic number and version]
+ * chunk count (4)
+ * for each chunk:
+ * chunk type (4) [CHUNK_{NORMAL, GZIP, DEFLATE, RAW}]
+ * if chunk type == CHUNK_NORMAL:
+ * source start (8)
+ * source len (8)
+ * bsdiff patch offset (8) [from start of patch file]
+ * if chunk type == CHUNK_GZIP: (version 1 only)
+ * source start (8)
+ * source len (8)
+ * bsdiff patch offset (8) [from start of patch file]
+ * source expanded len (8) [size of uncompressed source]
+ * target expected len (8) [size of uncompressed target]
+ * gzip level (4)
+ * method (4)
+ * windowBits (4)
+ * memLevel (4)
+ * strategy (4)
+ * gzip header len (4)
+ * gzip header (gzip header len)
+ * gzip footer (8)
+ * if chunk type == CHUNK_DEFLATE: (version 2 only)
+ * source start (8)
+ * source len (8)
+ * bsdiff patch offset (8) [from start of patch file]
+ * source expanded len (8) [size of uncompressed source]
+ * target expected len (8) [size of uncompressed target]
+ * gzip level (4)
+ * method (4)
+ * windowBits (4)
+ * memLevel (4)
+ * strategy (4)
+ * if chunk type == RAW: (version 2 only)
+ * target len (4)
+ * data (target len)
+ *
+ * All integers are little-endian. "source start" and "source len"
+ * specify the section of the input image that comprises this chunk,
+ * including the gzip header and footer for gzip chunks. "source
+ * expanded len" is the size of the uncompressed source data. "target
+ * expected len" is the size of the uncompressed data after applying
+ * the bsdiff patch. The next five parameters specify the zlib
+ * parameters to be used when compressing the patched data, and the
+ * next three specify the header and footer to be wrapped around the
+ * compressed data to create the output chunk (so that header contents
+ * like the timestamp are recreated exactly).
+ *
+ * After the header there are 'chunk count' bsdiff patches; the offset
+ * of each from the beginning of the file is specified in the header.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "zlib.h"
+#include "imgdiff.h"
+#include "utils.h"
+
+typedef struct {
+ int type; // CHUNK_NORMAL, CHUNK_DEFLATE
+ size_t start; // offset of chunk in original image file
+
+ size_t len;
+ unsigned char* data; // data to be patched (uncompressed, for deflate chunks)
+
+ size_t source_start;
+ size_t source_len;
+
+ // --- for CHUNK_DEFLATE chunks only: ---
+
+ // original (compressed) deflate data
+ size_t deflate_len;
+ unsigned char* deflate_data;
+
+ char* filename; // used for zip entries
+
+ // deflate encoder parameters
+ int level, method, windowBits, memLevel, strategy;
+
+ size_t source_uncompressed_len;
+} ImageChunk;
+
+typedef struct {
+ int data_offset;
+ int deflate_len;
+ int uncomp_len;
+ char* filename;
+} ZipFileEntry;
+
+static int fileentry_compare(const void* a, const void* b) {
+ int ao = ((ZipFileEntry*)a)->data_offset;
+ int bo = ((ZipFileEntry*)b)->data_offset;
+ if (ao < bo) {
+ return -1;
+ } else if (ao > bo) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+unsigned char* ReadZip(const char* filename,
+ int* num_chunks, ImageChunk** chunks,
+ int include_pseudo_chunk) {
+ struct stat st;
+ if (stat(filename, &st) != 0) {
+ fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
+ return NULL;
+ }
+
+ unsigned char* img = malloc(st.st_size);
+ FILE* f = fopen(filename, "rb");
+ if (fread(img, 1, st.st_size, f) != st.st_size) {
+ fprintf(stderr, "failed to read \"%s\" %s\n", filename, strerror(errno));
+ fclose(f);
+ return NULL;
+ }
+ fclose(f);
+
+ // look for the end-of-central-directory record.
+
+ int i;
+ for (i = st.st_size-20; i >= 0 && i > st.st_size - 65600; --i) {
+ if (img[i] == 0x50 && img[i+1] == 0x4b &&
+ img[i+2] == 0x05 && img[i+3] == 0x06) {
+ break;
+ }
+ }
+ // double-check: this archive consists of a single "disk"
+ if (!(img[i+4] == 0 && img[i+5] == 0 && img[i+6] == 0 && img[i+7] == 0)) {
+ fprintf(stderr, "can't process multi-disk archive\n");
+ return NULL;
+ }
+
+ int cdcount = Read2(img+i+8);
+ int cdoffset = Read4(img+i+16);
+
+ ZipFileEntry* temp_entries = malloc(cdcount * sizeof(ZipFileEntry));
+ int entrycount = 0;
+
+ unsigned char* cd = img+cdoffset;
+ for (i = 0; i < cdcount; ++i) {
+ if (!(cd[0] == 0x50 && cd[1] == 0x4b && cd[2] == 0x01 && cd[3] == 0x02)) {
+ fprintf(stderr, "bad central directory entry %d\n", i);
+ return NULL;
+ }
+
+ int clen = Read4(cd+20); // compressed len
+ int ulen = Read4(cd+24); // uncompressed len
+ int nlen = Read2(cd+28); // filename len
+ int xlen = Read2(cd+30); // extra field len
+ int mlen = Read2(cd+32); // file comment len
+ int hoffset = Read4(cd+42); // local header offset
+
+ char* filename = malloc(nlen+1);
+ memcpy(filename, cd+46, nlen);
+ filename[nlen] = '\0';
+
+ int method = Read2(cd+10);
+
+ cd += 46 + nlen + xlen + mlen;
+
+ if (method != 8) { // 8 == deflate
+ free(filename);
+ continue;
+ }
+
+ unsigned char* lh = img + hoffset;
+
+ if (!(lh[0] == 0x50 && lh[1] == 0x4b && lh[2] == 0x03 && lh[3] == 0x04)) {
+ fprintf(stderr, "bad local file header entry %d\n", i);
+ return NULL;
+ }
+
+ if (Read2(lh+26) != nlen || memcmp(lh+30, filename, nlen) != 0) {
+ fprintf(stderr, "central dir filename doesn't match local header\n");
+ return NULL;
+ }
+
+ xlen = Read2(lh+28); // extra field len; might be different from CD entry?
+
+ temp_entries[entrycount].data_offset = hoffset+30+nlen+xlen;
+ temp_entries[entrycount].deflate_len = clen;
+ temp_entries[entrycount].uncomp_len = ulen;
+ temp_entries[entrycount].filename = filename;
+ ++entrycount;
+ }
+
+ qsort(temp_entries, entrycount, sizeof(ZipFileEntry), fileentry_compare);
+
+#if 0
+ printf("found %d deflated entries\n", entrycount);
+ for (i = 0; i < entrycount; ++i) {
+ printf("off %10d len %10d unlen %10d %p %s\n",
+ temp_entries[i].data_offset,
+ temp_entries[i].deflate_len,
+ temp_entries[i].uncomp_len,
+ temp_entries[i].filename,
+ temp_entries[i].filename);
+ }
+#endif
+
+ *num_chunks = 0;
+ *chunks = malloc((entrycount*2+2) * sizeof(ImageChunk));
+ ImageChunk* curr = *chunks;
+
+ if (include_pseudo_chunk) {
+ curr->type = CHUNK_NORMAL;
+ curr->start = 0;
+ curr->len = st.st_size;
+ curr->data = img;
+ curr->filename = NULL;
+ ++curr;
+ ++*num_chunks;
+ }
+
+ int pos = 0;
+ int nextentry = 0;
+
+ while (pos < st.st_size) {
+ if (nextentry < entrycount && pos == temp_entries[nextentry].data_offset) {
+ curr->type = CHUNK_DEFLATE;
+ curr->start = pos;
+ curr->deflate_len = temp_entries[nextentry].deflate_len;
+ curr->deflate_data = img + pos;
+ curr->filename = temp_entries[nextentry].filename;
+
+ curr->len = temp_entries[nextentry].uncomp_len;
+ curr->data = malloc(curr->len);
+
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = curr->deflate_len;
+ strm.next_in = curr->deflate_data;
+
+ // -15 means we are decoding a 'raw' deflate stream; zlib will
+ // not expect zlib headers.
+ int ret = inflateInit2(&strm, -15);
+
+ strm.avail_out = curr->len;
+ strm.next_out = curr->data;
+ ret = inflate(&strm, Z_NO_FLUSH);
+ if (ret != Z_STREAM_END) {
+ fprintf(stderr, "failed to inflate \"%s\"; %d\n", curr->filename, ret);
+ return NULL;
+ }
+
+ inflateEnd(&strm);
+
+ pos += curr->deflate_len;
+ ++nextentry;
+ ++*num_chunks;
+ ++curr;
+ continue;
+ }
+
+ // use a normal chunk to take all the data up to the start of the
+ // next deflate section.
+
+ curr->type = CHUNK_NORMAL;
+ curr->start = pos;
+ if (nextentry < entrycount) {
+ curr->len = temp_entries[nextentry].data_offset - pos;
+ } else {
+ curr->len = st.st_size - pos;
+ }
+ curr->data = img + pos;
+ curr->filename = NULL;
+ pos += curr->len;
+
+ ++*num_chunks;
+ ++curr;
+ }
+
+ free(temp_entries);
+ return img;
+}
+
+/*
+ * Read the given file and break it up into chunks, putting the number
+ * of chunks and their info in *num_chunks and **chunks,
+ * respectively. Returns a malloc'd block of memory containing the
+ * contents of the file; various pointers in the output chunk array
+ * will point into this block of memory. The caller should free the
+ * return value when done with all the chunks. Returns NULL on
+ * failure.
+ */
+unsigned char* ReadImage(const char* filename,
+ int* num_chunks, ImageChunk** chunks) {
+ struct stat st;
+ if (stat(filename, &st) != 0) {
+ fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
+ return NULL;
+ }
+
+ unsigned char* img = malloc(st.st_size + 4);
+ FILE* f = fopen(filename, "rb");
+ if (fread(img, 1, st.st_size, f) != st.st_size) {
+ fprintf(stderr, "failed to read \"%s\" %s\n", filename, strerror(errno));
+ fclose(f);
+ return NULL;
+ }
+ fclose(f);
+
+ // append 4 zero bytes to the data so we can always search for the
+ // four-byte string 1f8b0800 starting at any point in the actual
+ // file data, without special-casing the end of the data.
+ memset(img+st.st_size, 0, 4);
+
+ size_t pos = 0;
+
+ *num_chunks = 0;
+ *chunks = NULL;
+
+ while (pos < st.st_size) {
+ unsigned char* p = img+pos;
+
+ if (st.st_size - pos >= 4 &&
+ p[0] == 0x1f && p[1] == 0x8b &&
+ p[2] == 0x08 && // deflate compression
+ p[3] == 0x00) { // no header flags
+ // 'pos' is the offset of the start of a gzip chunk.
+
+ *num_chunks += 3;
+ *chunks = realloc(*chunks, *num_chunks * sizeof(ImageChunk));
+ ImageChunk* curr = *chunks + (*num_chunks-3);
+
+ // create a normal chunk for the header.
+ curr->start = pos;
+ curr->type = CHUNK_NORMAL;
+ curr->len = GZIP_HEADER_LEN;
+ curr->data = p;
+
+ pos += curr->len;
+ p += curr->len;
+ ++curr;
+
+ curr->type = CHUNK_DEFLATE;
+ curr->filename = NULL;
+
+ // We must decompress this chunk in order to discover where it
+ // ends, and so we can put the uncompressed data and its length
+ // into curr->data and curr->len.
+
+ size_t allocated = 32768;
+ curr->len = 0;
+ curr->data = malloc(allocated);
+ curr->start = pos;
+ curr->deflate_data = p;
+
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = st.st_size - pos;
+ strm.next_in = p;
+
+ // -15 means we are decoding a 'raw' deflate stream; zlib will
+ // not expect zlib headers.
+ int ret = inflateInit2(&strm, -15);
+
+ do {
+ strm.avail_out = allocated - curr->len;
+ strm.next_out = curr->data + curr->len;
+ ret = inflate(&strm, Z_NO_FLUSH);
+ curr->len = allocated - strm.avail_out;
+ if (strm.avail_out == 0) {
+ allocated *= 2;
+ curr->data = realloc(curr->data, allocated);
+ }
+ } while (ret != Z_STREAM_END);
+
+ curr->deflate_len = st.st_size - strm.avail_in - pos;
+ inflateEnd(&strm);
+ pos += curr->deflate_len;
+ p += curr->deflate_len;
+ ++curr;
+
+ // create a normal chunk for the footer
+
+ curr->type = CHUNK_NORMAL;
+ curr->start = pos;
+ curr->len = GZIP_FOOTER_LEN;
+ curr->data = img+pos;
+
+ pos += curr->len;
+ p += curr->len;
+ ++curr;
+
+ // The footer (that we just skipped over) contains the size of
+ // the uncompressed data. Double-check to make sure that it
+ // matches the size of the data we got when we actually did
+ // the decompression.
+ size_t footer_size = Read4(p-4);
+ if (footer_size != curr[-2].len) {
+ fprintf(stderr, "Error: footer size %d != decompressed size %d\n",
+ footer_size, curr[-2].len);
+ free(img);
+ return NULL;
+ }
+ } else {
+ // Reallocate the list for every chunk; we expect the number of
+ // chunks to be small (5 for typical boot and recovery images).
+ ++*num_chunks;
+ *chunks = realloc(*chunks, *num_chunks * sizeof(ImageChunk));
+ ImageChunk* curr = *chunks + (*num_chunks-1);
+ curr->start = pos;
+
+ // 'pos' is not the offset of the start of a gzip chunk, so scan
+ // forward until we find a gzip header.
+ curr->type = CHUNK_NORMAL;
+ curr->data = p;
+
+ for (curr->len = 0; curr->len < (st.st_size - pos); ++curr->len) {
+ if (p[curr->len] == 0x1f &&
+ p[curr->len+1] == 0x8b &&
+ p[curr->len+2] == 0x08 &&
+ p[curr->len+3] == 0x00) {
+ break;
+ }
+ }
+ pos += curr->len;
+ }
+ }
+
+ return img;
+}
+
+#define BUFFER_SIZE 32768
+
+/*
+ * Takes the uncompressed data stored in the chunk, compresses it
+ * using the zlib parameters stored in the chunk, and checks that it
+ * matches exactly the compressed data we started with (also stored in
+ * the chunk). Return 0 on success.
+ */
+int TryReconstruction(ImageChunk* chunk, unsigned char* out) {
+ size_t p = 0;
+
+#if 0
+ fprintf(stderr, "trying %d %d %d %d %d\n",
+ chunk->level, chunk->method, chunk->windowBits,
+ chunk->memLevel, chunk->strategy);
+#endif
+
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = chunk->len;
+ strm.next_in = chunk->data;
+ int ret;
+ ret = deflateInit2(&strm, chunk->level, chunk->method, chunk->windowBits,
+ chunk->memLevel, chunk->strategy);
+ do {
+ strm.avail_out = BUFFER_SIZE;
+ strm.next_out = out;
+ ret = deflate(&strm, Z_FINISH);
+ size_t have = BUFFER_SIZE - strm.avail_out;
+
+ if (memcmp(out, chunk->deflate_data+p, have) != 0) {
+ // mismatch; data isn't the same.
+ deflateEnd(&strm);
+ return -1;
+ }
+ p += have;
+ } while (ret != Z_STREAM_END);
+ deflateEnd(&strm);
+ if (p != chunk->deflate_len) {
+ // mismatch; ran out of data before we should have.
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Verify that we can reproduce exactly the same compressed data that
+ * we started with. Sets the level, method, windowBits, memLevel, and
+ * strategy fields in the chunk to the encoding parameters needed to
+ * produce the right output. Returns 0 on success.
+ */
+int ReconstructDeflateChunk(ImageChunk* chunk) {
+ if (chunk->type != CHUNK_DEFLATE) {
+ fprintf(stderr, "attempt to reconstruct non-deflate chunk\n");
+ return -1;
+ }
+
+ size_t p = 0;
+ unsigned char* out = malloc(BUFFER_SIZE);
+
+ // We only check two combinations of encoder parameters: level 6
+ // (the default) and level 9 (the maximum).
+ for (chunk->level = 6; chunk->level <= 9; chunk->level += 3) {
+ chunk->windowBits = -15; // 32kb window; negative to indicate a raw stream.
+ chunk->memLevel = 8; // the default value.
+ chunk->method = Z_DEFLATED;
+ chunk->strategy = Z_DEFAULT_STRATEGY;
+
+ if (TryReconstruction(chunk, out) == 0) {
+ free(out);
+ return 0;
+ }
+ }
+
+ free(out);
+ return -1;
+}
+
+/*
+ * Given source and target chunks, compute a bsdiff patch between them
+ * by running bsdiff in a subprocess. Return the patch data, placing
+ * its length in *size. Return NULL on failure. We expect the bsdiff
+ * program to be in the path.
+ */
+unsigned char* MakePatch(ImageChunk* src, ImageChunk* tgt, size_t* size) {
+ if (tgt->type == CHUNK_NORMAL) {
+ if (tgt->len <= 160) {
+ tgt->type = CHUNK_RAW;
+ *size = tgt->len;
+ return tgt->data;
+ }
+ }
+
+ char stemp[] = "/tmp/imgdiff-src-XXXXXX";
+ char ttemp[] = "/tmp/imgdiff-tgt-XXXXXX";
+ char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
+ mkstemp(stemp);
+ mkstemp(ttemp);
+ mkstemp(ptemp);
+
+ FILE* f = fopen(stemp, "wb");
+ if (f == NULL) {
+ fprintf(stderr, "failed to open src chunk %s: %s\n",
+ stemp, strerror(errno));
+ return NULL;
+ }
+ if (fwrite(src->data, 1, src->len, f) != src->len) {
+ fprintf(stderr, "failed to write src chunk to %s: %s\n",
+ stemp, strerror(errno));
+ return NULL;
+ }
+ fclose(f);
+
+ f = fopen(ttemp, "wb");
+ if (f == NULL) {
+ fprintf(stderr, "failed to open tgt chunk %s: %s\n",
+ ttemp, strerror(errno));
+ return NULL;
+ }
+ if (fwrite(tgt->data, 1, tgt->len, f) != tgt->len) {
+ fprintf(stderr, "failed to write tgt chunk to %s: %s\n",
+ ttemp, strerror(errno));
+ return NULL;
+ }
+ fclose(f);
+
+ char cmd[200];
+ sprintf(cmd, "bsdiff %s %s %s", stemp, ttemp, ptemp);
+ if (system(cmd) != 0) {
+ fprintf(stderr, "failed to run bsdiff: %s\n", strerror(errno));
+ return NULL;
+ }
+
+ struct stat st;
+ if (stat(ptemp, &st) != 0) {
+ fprintf(stderr, "failed to stat patch file %s: %s\n",
+ ptemp, strerror(errno));
+ return NULL;
+ }
+
+ unsigned char* data = malloc(st.st_size);
+
+ if (tgt->type == CHUNK_NORMAL && tgt->len <= st.st_size) {
+ unlink(stemp);
+ unlink(ttemp);
+ unlink(ptemp);
+
+ tgt->type = CHUNK_RAW;
+ *size = tgt->len;
+ return tgt->data;
+ }
+
+ *size = st.st_size;
+
+ f = fopen(ptemp, "rb");
+ if (f == NULL) {
+ fprintf(stderr, "failed to open patch %s: %s\n", ptemp, strerror(errno));
+ return NULL;
+ }
+ if (fread(data, 1, st.st_size, f) != st.st_size) {
+ fprintf(stderr, "failed to read patch %s: %s\n", ptemp, strerror(errno));
+ return NULL;
+ }
+ fclose(f);
+
+ unlink(stemp);
+ unlink(ttemp);
+ unlink(ptemp);
+
+ tgt->source_start = src->start;
+ switch (tgt->type) {
+ case CHUNK_NORMAL:
+ tgt->source_len = src->len;
+ break;
+ case CHUNK_DEFLATE:
+ tgt->source_len = src->deflate_len;
+ tgt->source_uncompressed_len = src->len;
+ break;
+ }
+
+ return data;
+}
+
+/*
+ * Cause a gzip chunk to be treated as a normal chunk (ie, as a blob
+ * of uninterpreted data). The resulting patch will likely be about
+ * as big as the target file, but it lets us handle the case of images
+ * where some gzip chunks are reconstructible but others aren't (by
+ * treating the ones that aren't as normal chunks).
+ */
+void ChangeDeflateChunkToNormal(ImageChunk* ch) {
+ if (ch->type != CHUNK_DEFLATE) return;
+ ch->type = CHUNK_NORMAL;
+ free(ch->data);
+ ch->data = ch->deflate_data;
+ ch->len = ch->deflate_len;
+}
+
+/*
+ * Return true if the data in the chunk is identical (including the
+ * compressed representation, for gzip chunks).
+ */
+int AreChunksEqual(ImageChunk* a, ImageChunk* b) {
+ if (a->type != b->type) return 0;
+
+ switch (a->type) {
+ case CHUNK_NORMAL:
+ return a->len == b->len && memcmp(a->data, b->data, a->len) == 0;
+
+ case CHUNK_DEFLATE:
+ return a->deflate_len == b->deflate_len &&
+ memcmp(a->deflate_data, b->deflate_data, a->deflate_len) == 0;
+
+ default:
+ fprintf(stderr, "unknown chunk type %d\n", a->type);
+ return 0;
+ }
+}
+
+/*
+ * Look for runs of adjacent normal chunks and compress them down into
+ * a single chunk. (Such runs can be produced when deflate chunks are
+ * changed to normal chunks.)
+ */
+void MergeAdjacentNormalChunks(ImageChunk* chunks, int* num_chunks) {
+ int out = 0;
+ int in_start = 0, in_end;
+ while (in_start < *num_chunks) {
+ if (chunks[in_start].type != CHUNK_NORMAL) {
+ in_end = in_start+1;
+ } else {
+ // in_start is a normal chunk. Look for a run of normal chunks
+ // that constitute a solid block of data (ie, each chunk begins
+ // where the previous one ended).
+ for (in_end = in_start+1;
+ in_end < *num_chunks && chunks[in_end].type == CHUNK_NORMAL &&
+ (chunks[in_end].start ==
+ chunks[in_end-1].start + chunks[in_end-1].len &&
+ chunks[in_end].data ==
+ chunks[in_end-1].data + chunks[in_end-1].len);
+ ++in_end);
+ }
+
+ if (in_end == in_start+1) {
+#if 0
+ printf("chunk %d is now %d\n", in_start, out);
+#endif
+ if (out != in_start) {
+ memcpy(chunks+out, chunks+in_start, sizeof(ImageChunk));
+ }
+ } else {
+#if 0
+ printf("collapse normal chunks %d-%d into %d\n", in_start, in_end-1, out);
+#endif
+
+ // Merge chunks [in_start, in_end-1] into one chunk. Since the
+ // data member of each chunk is just a pointer into an in-memory
+ // copy of the file, this can be done without recopying (the
+ // output chunk has the first chunk's start location and data
+ // pointer, and length equal to the sum of the input chunk
+ // lengths).
+ chunks[out].type = CHUNK_NORMAL;
+ chunks[out].start = chunks[in_start].start;
+ chunks[out].data = chunks[in_start].data;
+ chunks[out].len = chunks[in_end-1].len +
+ (chunks[in_end-1].start - chunks[in_start].start);
+ }
+
+ ++out;
+ in_start = in_end;
+ }
+ *num_chunks = out;
+}
+
+ImageChunk* FindChunkByName(const char* name,
+ ImageChunk* chunks, int num_chunks) {
+ int i;
+ for (i = 0; i < num_chunks; ++i) {
+ if (chunks[i].type == CHUNK_DEFLATE && chunks[i].filename &&
+ strcmp(name, chunks[i].filename) == 0) {
+ return chunks+i;
+ }
+ }
+ return NULL;
+}
+
+int main(int argc, char** argv) {
+ if (argc != 4 && argc != 5) {
+ usage:
+ fprintf(stderr, "usage: %s [-z] <src-img> <tgt-img> <patch-file>\n",
+ argv[0]);
+ return 2;
+ }
+
+ int zip_mode = 0;
+
+ if (strcmp(argv[1], "-z") == 0) {
+ zip_mode = 1;
+ --argc;
+ ++argv;
+ }
+
+
+ int num_src_chunks;
+ ImageChunk* src_chunks;
+ int num_tgt_chunks;
+ ImageChunk* tgt_chunks;
+ int i;
+
+ if (zip_mode) {
+ if (ReadZip(argv[1], &num_src_chunks, &src_chunks, 1) == NULL) {
+ fprintf(stderr, "failed to break apart source zip file\n");
+ return 1;
+ }
+ if (ReadZip(argv[2], &num_tgt_chunks, &tgt_chunks, 0) == NULL) {
+ fprintf(stderr, "failed to break apart target zip file\n");
+ return 1;
+ }
+ } else {
+ if (ReadImage(argv[1], &num_src_chunks, &src_chunks) == NULL) {
+ fprintf(stderr, "failed to break apart source image\n");
+ return 1;
+ }
+ if (ReadImage(argv[2], &num_tgt_chunks, &tgt_chunks) == NULL) {
+ fprintf(stderr, "failed to break apart target image\n");
+ return 1;
+ }
+
+ // Verify that the source and target images have the same chunk
+ // structure (ie, the same sequence of deflate and normal chunks).
+
+ if (num_src_chunks != num_tgt_chunks) {
+ fprintf(stderr, "source and target don't have same number of chunks!\n");
+ return 1;
+ }
+ for (i = 0; i < num_src_chunks; ++i) {
+ if (src_chunks[i].type != tgt_chunks[i].type) {
+ fprintf(stderr, "source and target don't have same chunk "
+ "structure! (chunk %d)\n", i);
+ return 1;
+ }
+ }
+ }
+
+ for (i = 0; i < num_tgt_chunks; ++i) {
+ if (tgt_chunks[i].type == CHUNK_DEFLATE) {
+ // Confirm that given the uncompressed chunk data in the target, we
+ // can recompress it and get exactly the same bits as are in the
+ // input target image. If this fails, treat the chunk as a normal
+ // non-deflated chunk.
+ if (ReconstructDeflateChunk(tgt_chunks+i) < 0) {
+ printf("failed to reconstruct target deflate chunk %d [%s]; "
+ "treating as normal\n", i, tgt_chunks[i].filename);
+ ChangeDeflateChunkToNormal(tgt_chunks+i);
+ if (zip_mode) {
+ ImageChunk* src = FindChunkByName(tgt_chunks[i].filename, src_chunks, num_src_chunks);
+ if (src) {
+ ChangeDeflateChunkToNormal(src);
+ }
+ } else {
+ ChangeDeflateChunkToNormal(src_chunks+i);
+ }
+ continue;
+ }
+
+ // If two deflate chunks are identical (eg, the kernel has not
+ // changed between two builds), treat them as normal chunks.
+ // This makes applypatch much faster -- it can apply a trivial
+ // patch to the compressed data, rather than uncompressing and
+ // recompressing to apply the trivial patch to the uncompressed
+ // data.
+ ImageChunk* src;
+ if (zip_mode) {
+ src = FindChunkByName(tgt_chunks[i].filename, src_chunks, num_src_chunks);
+ } else {
+ src = src_chunks+i;
+ }
+
+ if (src == NULL || AreChunksEqual(tgt_chunks+i, src)) {
+ ChangeDeflateChunkToNormal(tgt_chunks+i);
+ if (src) {
+ ChangeDeflateChunkToNormal(src);
+ }
+ }
+ }
+ }
+
+ // Merging neighboring normal chunks.
+ if (zip_mode) {
+ // For zips, we only need to do this to the target: deflated
+ // chunks are matched via filename, and normal chunks are patched
+ // using the entire source file as the source.
+ MergeAdjacentNormalChunks(tgt_chunks, &num_tgt_chunks);
+ } else {
+ // For images, we need to maintain the parallel structure of the
+ // chunk lists, so do the merging in both the source and target
+ // lists.
+ MergeAdjacentNormalChunks(tgt_chunks, &num_tgt_chunks);
+ MergeAdjacentNormalChunks(src_chunks, &num_src_chunks);
+ if (num_src_chunks != num_tgt_chunks) {
+ // This shouldn't happen.
+ fprintf(stderr, "merging normal chunks went awry\n");
+ return 1;
+ }
+ }
+
+ // Compute bsdiff patches for each chunk's data (the uncompressed
+ // data, in the case of deflate chunks).
+
+ printf("Construct patches for %d chunks...\n", num_tgt_chunks);
+ unsigned char** patch_data = malloc(num_tgt_chunks * sizeof(unsigned char*));
+ size_t* patch_size = malloc(num_tgt_chunks * sizeof(size_t));
+ for (i = 0; i < num_tgt_chunks; ++i) {
+ if (zip_mode) {
+ ImageChunk* src;
+ if (tgt_chunks[i].type == CHUNK_DEFLATE &&
+ (src = FindChunkByName(tgt_chunks[i].filename, src_chunks,
+ num_src_chunks))) {
+ patch_data[i] = MakePatch(src, tgt_chunks+i, patch_size+i);
+ } else {
+ patch_data[i] = MakePatch(src_chunks, tgt_chunks+i, patch_size+i);
+ }
+ } else {
+ patch_data[i] = MakePatch(src_chunks+i, tgt_chunks+i, patch_size+i);
+ }
+ printf("patch %3d is %d bytes (of %d)\n",
+ i, patch_size[i], tgt_chunks[i].source_len);
+ }
+
+ // Figure out how big the imgdiff file header is going to be, so
+ // that we can correctly compute the offset of each bsdiff patch
+ // within the file.
+
+ size_t total_header_size = 12;
+ for (i = 0; i < num_tgt_chunks; ++i) {
+ total_header_size += 4;
+ switch (tgt_chunks[i].type) {
+ case CHUNK_NORMAL:
+ total_header_size += 8*3;
+ break;
+ case CHUNK_DEFLATE:
+ total_header_size += 8*5 + 4*5;
+ break;
+ case CHUNK_RAW:
+ total_header_size += 4 + patch_size[i];
+ break;
+ }
+ }
+
+ size_t offset = total_header_size;
+
+ FILE* f = fopen(argv[3], "wb");
+
+ // Write out the headers.
+
+ fwrite("IMGDIFF2", 1, 8, f);
+ Write4(num_tgt_chunks, f);
+ for (i = 0; i < num_tgt_chunks; ++i) {
+ Write4(tgt_chunks[i].type, f);
+
+ switch (tgt_chunks[i].type) {
+ case CHUNK_NORMAL:
+ printf("chunk %3d: normal (%10d, %10d) %10d\n", i,
+ tgt_chunks[i].start, tgt_chunks[i].len, patch_size[i]);
+ Write8(tgt_chunks[i].source_start, f);
+ Write8(tgt_chunks[i].source_len, f);
+ Write8(offset, f);
+ offset += patch_size[i];
+ break;
+
+ case CHUNK_DEFLATE:
+ printf("chunk %3d: deflate (%10d, %10d) %10d %s\n", i,
+ tgt_chunks[i].start, tgt_chunks[i].deflate_len, patch_size[i],
+ tgt_chunks[i].filename);
+ Write8(tgt_chunks[i].source_start, f);
+ Write8(tgt_chunks[i].source_len, f);
+ Write8(offset, f);
+ Write8(tgt_chunks[i].source_uncompressed_len, f);
+ Write8(tgt_chunks[i].len, f);
+ Write4(tgt_chunks[i].level, f);
+ Write4(tgt_chunks[i].method, f);
+ Write4(tgt_chunks[i].windowBits, f);
+ Write4(tgt_chunks[i].memLevel, f);
+ Write4(tgt_chunks[i].strategy, f);
+ offset += patch_size[i];
+ break;
+
+ case CHUNK_RAW:
+ printf("chunk %3d: raw (%10d, %10d)\n", i,
+ tgt_chunks[i].start, tgt_chunks[i].len);
+ Write4(patch_size[i], f);
+ fwrite(patch_data[i], 1, patch_size[i], f);
+ break;
+ }
+ }
+
+ // Append each chunk's bsdiff patch, in order.
+
+ for (i = 0; i < num_tgt_chunks; ++i) {
+ if (tgt_chunks[i].type != CHUNK_RAW) {
+ fwrite(patch_data[i], 1, patch_size[i], f);
+ }
+ }
+
+ fclose(f);
+
+ return 0;
+}
diff --git a/tools/applypatch/imgdiff.h b/tools/applypatch/imgdiff.h
new file mode 100644
index 0000000..f2069b4
--- /dev/null
+++ b/tools/applypatch/imgdiff.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Image patch chunk types
+#define CHUNK_NORMAL 0
+#define CHUNK_GZIP 1 // version 1 only
+#define CHUNK_DEFLATE 2 // version 2 only
+#define CHUNK_RAW 3 // version 2 only
+
+// The gzip header size is actually variable, but we currently don't
+// support gzipped data with any of the optional fields, so for now it
+// will always be ten bytes. See RFC 1952 for the definition of the
+// gzip format.
+#define GZIP_HEADER_LEN 10
+
+// The gzip footer size really is fixed.
+#define GZIP_FOOTER_LEN 8
diff --git a/tools/applypatch/imgdiff_test.sh b/tools/applypatch/imgdiff_test.sh
new file mode 100755
index 0000000..dcdb922
--- /dev/null
+++ b/tools/applypatch/imgdiff_test.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+# A script for testing imgdiff/applypatch. It takes two full OTA
+# packages as arguments. It generates (on the host) patches for all
+# the zip/jar/apk files they have in common, as well as boot and
+# recovery images. It then applies the patches on the device (or
+# emulator) and checks that the resulting file is correct.
+
+EMULATOR_PORT=5580
+
+# set to 0 to use a device instead
+USE_EMULATOR=0
+
+# where on the device to do all the patching.
+WORK_DIR=/data/local/tmp
+
+START_OTA_PACKAGE=$1
+END_OTA_PACKAGE=$2
+
+# ------------------------
+
+tmpdir=$(mktemp -d)
+
+if [ "$USE_EMULATOR" == 1 ]; then
+ emulator -wipe-data -noaudio -no-window -port $EMULATOR_PORT &
+ pid_emulator=$!
+ ADB="adb -s emulator-$EMULATOR_PORT "
+else
+ ADB="adb -d "
+fi
+
+echo "waiting to connect to device"
+$ADB wait-for-device
+
+# run a command on the device; exit with the exit status of the device
+# command.
+run_command() {
+ $ADB shell "$@" \; echo \$? | awk '{if (b) {print a}; a=$0; b=1} END {exit a}'
+}
+
+testname() {
+ echo
+ echo "$1"...
+ testname="$1"
+}
+
+fail() {
+ echo
+ echo FAIL: $testname
+ echo
+ [ "$open_pid" == "" ] || kill $open_pid
+ [ "$pid_emulator" == "" ] || kill $pid_emulator
+ exit 1
+}
+
+sha1() {
+ sha1sum $1 | awk '{print $1}'
+}
+
+size() {
+ stat -c %s $1 | tr -d '\n'
+}
+
+cleanup() {
+ # not necessary if we're about to kill the emulator, but nice for
+ # running on real devices or already-running emulators.
+ testname "removing test files"
+ run_command rm $WORK_DIR/applypatch
+ run_command rm $WORK_DIR/source
+ run_command rm $WORK_DIR/target
+ run_command rm $WORK_DIR/patch
+
+ [ "$pid_emulator" == "" ] || kill $pid_emulator
+
+ rm -rf $tmpdir
+}
+
+$ADB push $ANDROID_PRODUCT_OUT/system/bin/applypatch $WORK_DIR/applypatch
+
+patch_and_apply() {
+ local fn=$1
+ shift
+
+ unzip -p $START_OTA_PACKAGE $fn > $tmpdir/source
+ unzip -p $END_OTA_PACKAGE $fn > $tmpdir/target
+ imgdiff "$@" $tmpdir/source $tmpdir/target $tmpdir/patch
+ bsdiff $tmpdir/source $tmpdir/target $tmpdir/patch.bs
+ echo "patch for $fn is $(size $tmpdir/patch) [of $(size $tmpdir/target)] ($(size $tmpdir/patch.bs) with bsdiff)"
+ echo "$fn $(size $tmpdir/patch) of $(size $tmpdir/target) bsdiff $(size $tmpdir/patch.bs)" >> /tmp/stats.txt
+ $ADB push $tmpdir/source $WORK_DIR/source || fail "source push failed"
+ run_command rm /data/local/tmp/target
+ $ADB push $tmpdir/patch $WORK_DIR/patch || fail "patch push failed"
+ run_command /data/local/tmp/applypatch /data/local/tmp/source \
+ /data/local/tmp/target $(sha1 $tmpdir/target) $(size $tmpdir/target) \
+ $(sha1 $tmpdir/source):/data/local/tmp/patch \
+ || fail "applypatch of $fn failed"
+ $ADB pull /data/local/tmp/target $tmpdir/result
+ diff -q $tmpdir/target $tmpdir/result || fail "patch output not correct!"
+}
+
+# --------------- basic execution ----------------------
+
+for i in $((zipinfo -1 $START_OTA_PACKAGE; zipinfo -1 $END_OTA_PACKAGE) | \
+ sort | uniq -d | egrep -e '[.](apk|jar|zip)$'); do
+ patch_and_apply $i -z
+done
+patch_and_apply boot.img
+patch_and_apply system/recovery.img
+
+
+# --------------- cleanup ----------------------
+
+cleanup
+
+echo
+echo PASS
+echo
+
diff --git a/tools/applypatch/imgpatch.c b/tools/applypatch/imgpatch.c
new file mode 100644
index 0000000..697cc68
--- /dev/null
+++ b/tools/applypatch/imgpatch.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// See imgdiff.c in this directory for a description of the patch file
+// format.
+
+#include <stdio.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "zlib.h"
+#include "mincrypt/sha.h"
+#include "applypatch.h"
+#include "imgdiff.h"
+#include "utils.h"
+
+/*
+ * Apply the patch given in 'patch_filename' to the source data given
+ * by (old_data, old_size). Write the patched output to the 'output'
+ * file, and update the SHA context with the output data as well.
+ * Return 0 on success.
+ */
+int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
+ const char* patch_filename,
+ FILE* output, SHA_CTX* ctx) {
+ FILE* f;
+ if ((f = fopen(patch_filename, "rb")) == NULL) {
+ fprintf(stderr, "failed to open patch file\n");
+ return -1;
+ }
+
+ unsigned char header[12];
+ if (fread(header, 1, 12, f) != 12) {
+ fprintf(stderr, "failed to read patch file header\n");
+ return -1;
+ }
+
+ // IMGDIFF1 uses CHUNK_NORMAL and CHUNK_GZIP.
+ // IMGDIFF2 uses CHUNK_NORMAL, CHUNK_DEFLATE, and CHUNK_RAW.
+ if (memcmp(header, "IMGDIFF", 7) != 0 ||
+ (header[7] != '1' && header[7] != '2')) {
+ fprintf(stderr, "corrupt patch file header (magic number)\n");
+ return -1;
+ }
+
+ int num_chunks = Read4(header+8);
+
+ int i;
+ for (i = 0; i < num_chunks; ++i) {
+ // each chunk's header record starts with 4 bytes.
+ unsigned char chunk[4];
+ if (fread(chunk, 1, 4, f) != 4) {
+ fprintf(stderr, "failed to read chunk %d record\n", i);
+ return -1;
+ }
+
+ int type = Read4(chunk);
+
+ if (type == CHUNK_NORMAL) {
+ unsigned char normal_header[24];
+ if (fread(normal_header, 1, 24, f) != 24) {
+ fprintf(stderr, "failed to read chunk %d normal header data\n", i);
+ return -1;
+ }
+
+ size_t src_start = Read8(normal_header);
+ size_t src_len = Read8(normal_header+8);
+ size_t patch_offset = Read8(normal_header+16);
+
+ fprintf(stderr, "CHUNK %d: normal patch offset %d\n", i, patch_offset);
+
+ ApplyBSDiffPatch(old_data + src_start, src_len,
+ patch_filename, patch_offset,
+ output, ctx);
+ } else if (type == CHUNK_GZIP) {
+ // This branch is basically a duplicate of the CHUNK_DEFLATE
+ // branch, with a bit of extra processing for the gzip header
+ // and footer. I've avoided factoring the common code out since
+ // this branch will just be deleted when we drop support for
+ // IMGDIFF1.
+
+ // gzip chunks have an additional 64 + gzip_header_len + 8 bytes
+ // in their chunk header.
+ unsigned char* gzip = malloc(64);
+ if (fread(gzip, 1, 64, f) != 64) {
+ fprintf(stderr, "failed to read chunk %d initial gzip header data\n",
+ i);
+ return -1;
+ }
+ size_t gzip_header_len = Read4(gzip+60);
+ gzip = realloc(gzip, 64 + gzip_header_len + 8);
+ if (fread(gzip+64, 1, gzip_header_len+8, f) != gzip_header_len+8) {
+ fprintf(stderr, "failed to read chunk %d remaining gzip header data\n",
+ i);
+ return -1;
+ }
+
+ size_t src_start = Read8(gzip);
+ size_t src_len = Read8(gzip+8);
+ size_t patch_offset = Read8(gzip+16);
+
+ size_t expanded_len = Read8(gzip+24);
+ size_t target_len = Read8(gzip+32);
+ int gz_level = Read4(gzip+40);
+ int gz_method = Read4(gzip+44);
+ int gz_windowBits = Read4(gzip+48);
+ int gz_memLevel = Read4(gzip+52);
+ int gz_strategy = Read4(gzip+56);
+
+ fprintf(stderr, "CHUNK %d: gzip patch offset %d\n", i, patch_offset);
+
+ // Decompress the source data; the chunk header tells us exactly
+ // how big we expect it to be when decompressed.
+
+ unsigned char* expanded_source = malloc(expanded_len);
+ if (expanded_source == NULL) {
+ fprintf(stderr, "failed to allocate %d bytes for expanded_source\n",
+ expanded_len);
+ return -1;
+ }
+
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = src_len - (gzip_header_len + 8);
+ strm.next_in = (unsigned char*)(old_data + src_start + gzip_header_len);
+ strm.avail_out = expanded_len;
+ strm.next_out = expanded_source;
+
+ int ret;
+ ret = inflateInit2(&strm, -15);
+ if (ret != Z_OK) {
+ fprintf(stderr, "failed to init source inflation: %d\n", ret);
+ return -1;
+ }
+
+ // Because we've provided enough room to accommodate the output
+ // data, we expect one call to inflate() to suffice.
+ ret = inflate(&strm, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END) {
+ fprintf(stderr, "source inflation returned %d\n", ret);
+ return -1;
+ }
+ // We should have filled the output buffer exactly.
+ if (strm.avail_out != 0) {
+ fprintf(stderr, "source inflation short by %d bytes\n", strm.avail_out);
+ return -1;
+ }
+ inflateEnd(&strm);
+
+ // Next, apply the bsdiff patch (in memory) to the uncompressed
+ // data.
+ unsigned char* uncompressed_target_data;
+ ssize_t uncompressed_target_size;
+ if (ApplyBSDiffPatchMem(expanded_source, expanded_len,
+ patch_filename, patch_offset,
+ &uncompressed_target_data,
+ &uncompressed_target_size) != 0) {
+ return -1;
+ }
+
+ // Now compress the target data and append it to the output.
+
+ // start with the gzip header.
+ fwrite(gzip+64, 1, gzip_header_len, output);
+ SHA_update(ctx, gzip+64, gzip_header_len);
+
+ // we're done with the expanded_source data buffer, so we'll
+ // reuse that memory to receive the output of deflate.
+ unsigned char* temp_data = expanded_source;
+ ssize_t temp_size = expanded_len;
+ if (temp_size < 32768) {
+ // ... unless the buffer is too small, in which case we'll
+ // allocate a fresh one.
+ free(temp_data);
+ temp_data = malloc(32768);
+ temp_size = 32768;
+ }
+
+ // now the deflate stream
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = uncompressed_target_size;
+ strm.next_in = uncompressed_target_data;
+ ret = deflateInit2(&strm, gz_level, gz_method, gz_windowBits,
+ gz_memLevel, gz_strategy);
+ do {
+ strm.avail_out = temp_size;
+ strm.next_out = temp_data;
+ ret = deflate(&strm, Z_FINISH);
+ size_t have = temp_size - strm.avail_out;
+
+ if (fwrite(temp_data, 1, have, output) != have) {
+ fprintf(stderr, "failed to write %d compressed bytes to output\n",
+ have);
+ return -1;
+ }
+ SHA_update(ctx, temp_data, have);
+ } while (ret != Z_STREAM_END);
+ deflateEnd(&strm);
+
+ // lastly, the gzip footer.
+ fwrite(gzip+64+gzip_header_len, 1, 8, output);
+ SHA_update(ctx, gzip+64+gzip_header_len, 8);
+
+ free(temp_data);
+ free(uncompressed_target_data);
+ free(gzip);
+ } else if (type == CHUNK_RAW) {
+ unsigned char raw_header[4];
+ if (fread(raw_header, 1, 4, f) != 4) {
+ fprintf(stderr, "failed to read chunk %d raw header data\n", i);
+ return -1;
+ }
+
+ size_t data_len = Read4(raw_header);
+
+ fprintf(stderr, "CHUNK %d: raw data %d\n", i, data_len);
+
+ unsigned char* temp = malloc(data_len);
+ if (fread(temp, 1, data_len, f) != data_len) {
+ fprintf(stderr, "failed to read chunk %d raw data\n", i);
+ return -1;
+ }
+ SHA_update(ctx, temp, data_len);
+ if (fwrite(temp, 1, data_len, output) != data_len) {
+ fprintf(stderr, "failed to write chunk %d raw data\n", i);
+ return -1;
+ }
+ } else if (type == CHUNK_DEFLATE) {
+ // deflate chunks have an additional 60 bytes in their chunk header.
+ unsigned char deflate_header[60];
+ if (fread(deflate_header, 1, 60, f) != 60) {
+ fprintf(stderr, "failed to read chunk %d deflate header data\n", i);
+ return -1;
+ }
+
+ size_t src_start = Read8(deflate_header);
+ size_t src_len = Read8(deflate_header+8);
+ size_t patch_offset = Read8(deflate_header+16);
+ size_t expanded_len = Read8(deflate_header+24);
+ size_t target_len = Read8(deflate_header+32);
+ int level = Read4(deflate_header+40);
+ int method = Read4(deflate_header+44);
+ int windowBits = Read4(deflate_header+48);
+ int memLevel = Read4(deflate_header+52);
+ int strategy = Read4(deflate_header+56);
+
+ fprintf(stderr, "CHUNK %d: deflate patch offset %d\n", i, patch_offset);
+
+ // Decompress the source data; the chunk header tells us exactly
+ // how big we expect it to be when decompressed.
+
+ unsigned char* expanded_source = malloc(expanded_len);
+ if (expanded_source == NULL) {
+ fprintf(stderr, "failed to allocate %d bytes for expanded_source\n",
+ expanded_len);
+ return -1;
+ }
+
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = src_len;
+ strm.next_in = (unsigned char*)(old_data + src_start);
+ strm.avail_out = expanded_len;
+ strm.next_out = expanded_source;
+
+ int ret;
+ ret = inflateInit2(&strm, -15);
+ if (ret != Z_OK) {
+ fprintf(stderr, "failed to init source inflation: %d\n", ret);
+ return -1;
+ }
+
+ // Because we've provided enough room to accommodate the output
+ // data, we expect one call to inflate() to suffice.
+ ret = inflate(&strm, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END) {
+ fprintf(stderr, "source inflation returned %d\n", ret);
+ return -1;
+ }
+ // We should have filled the output buffer exactly.
+ if (strm.avail_out != 0) {
+ fprintf(stderr, "source inflation short by %d bytes\n", strm.avail_out);
+ return -1;
+ }
+ inflateEnd(&strm);
+
+ // Next, apply the bsdiff patch (in memory) to the uncompressed
+ // data.
+ unsigned char* uncompressed_target_data;
+ ssize_t uncompressed_target_size;
+ if (ApplyBSDiffPatchMem(expanded_source, expanded_len,
+ patch_filename, patch_offset,
+ &uncompressed_target_data,
+ &uncompressed_target_size) != 0) {
+ return -1;
+ }
+
+ // Now compress the target data and append it to the output.
+
+ // we're done with the expanded_source data buffer, so we'll
+ // reuse that memory to receive the output of deflate.
+ unsigned char* temp_data = expanded_source;
+ ssize_t temp_size = expanded_len;
+ if (temp_size < 32768) {
+ // ... unless the buffer is too small, in which case we'll
+ // allocate a fresh one.
+ free(temp_data);
+ temp_data = malloc(32768);
+ temp_size = 32768;
+ }
+
+ // now the deflate stream
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.avail_in = uncompressed_target_size;
+ strm.next_in = uncompressed_target_data;
+ ret = deflateInit2(&strm, level, method, windowBits, memLevel, strategy);
+ do {
+ strm.avail_out = temp_size;
+ strm.next_out = temp_data;
+ ret = deflate(&strm, Z_FINISH);
+ size_t have = temp_size - strm.avail_out;
+
+ if (fwrite(temp_data, 1, have, output) != have) {
+ fprintf(stderr, "failed to write %d compressed bytes to output\n",
+ have);
+ return -1;
+ }
+ SHA_update(ctx, temp_data, have);
+ } while (ret != Z_STREAM_END);
+ deflateEnd(&strm);
+
+ free(temp_data);
+ free(uncompressed_target_data);
+ } else {
+ fprintf(stderr, "patch chunk %d is unknown type %d\n", i, type);
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/applypatch/main.c b/tools/applypatch/main.c
new file mode 100644
index 0000000..e25c730
--- /dev/null
+++ b/tools/applypatch/main.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+extern int applypatch(int argc, char** argv);
+
+// This program applies binary patches to files in a way that is safe
+// (the original file is not touched until we have the desired
+// replacement for it) and idempotent (it's okay to run this program
+// multiple times).
+//
+// - if the sha1 hash of <tgt-file> is <tgt-sha1>, does nothing and exits
+// successfully.
+//
+// - otherwise, if the sha1 hash of <src-file> is <src-sha1>, applies the
+// bsdiff <patch> to <src-file> to produce a new file (the type of patch
+// is automatically detected from the file header). If that new
+// file has sha1 hash <tgt-sha1>, moves it to replace <tgt-file>, and
+// exits successfully. Note that if <src-file> and <tgt-file> are
+// not the same, <src-file> is NOT deleted on success. <tgt-file>
+// may be the string "-" to mean "the same as src-file".
+//
+// - otherwise, or if any error is encountered, exits with non-zero
+// status.
+//
+// <src-file> (or <file> in check mode) may refer to an MTD partition
+// to read the source data. See the comments for the
+// LoadMTDContents() function above for the format of such a filename.
+
+int main(int argc, char** argv) {
+ int result = applypatch(argc, argv);
+ if (result == 2) {
+ fprintf(stderr,
+ "usage: %s <src-file> <tgt-file> <tgt-sha1> <tgt-size> "
+ "[<src-sha1>:<patch> ...]\n"
+ " or %s -c <file> [<sha1> ...]\n"
+ " or %s -s <bytes>\n"
+ " or %s -l\n"
+ "\n"
+ "Filenames may be of the form\n"
+ " MTD:<partition>:<len_1>:<sha1_1>:<len_2>:<sha1_2>:...\n"
+ "to specify reading from or writing to an MTD partition.\n\n",
+ argv[0], argv[0], argv[0], argv[0]);
+ }
+ return result;
+}
diff --git a/tools/applypatch/utils.c b/tools/applypatch/utils.c
new file mode 100644
index 0000000..912229b
--- /dev/null
+++ b/tools/applypatch/utils.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include "utils.h"
+
+/** Write a 4-byte value to f in little-endian order. */
+void Write4(int value, FILE* f) {
+ fputc(value & 0xff, f);
+ fputc((value >> 8) & 0xff, f);
+ fputc((value >> 16) & 0xff, f);
+ fputc((value >> 24) & 0xff, f);
+}
+
+/** Write an 8-byte value to f in little-endian order. */
+void Write8(long long value, FILE* f) {
+ fputc(value & 0xff, f);
+ fputc((value >> 8) & 0xff, f);
+ fputc((value >> 16) & 0xff, f);
+ fputc((value >> 24) & 0xff, f);
+ fputc((value >> 32) & 0xff, f);
+ fputc((value >> 40) & 0xff, f);
+ fputc((value >> 48) & 0xff, f);
+ fputc((value >> 56) & 0xff, f);
+}
+
+int Read2(unsigned char* p) {
+ return (int)(((unsigned int)p[1] << 8) |
+ (unsigned int)p[0]);
+}
+
+int Read4(unsigned char* p) {
+ return (int)(((unsigned int)p[3] << 24) |
+ ((unsigned int)p[2] << 16) |
+ ((unsigned int)p[1] << 8) |
+ (unsigned int)p[0]);
+}
+
+long long Read8(unsigned char* p) {
+ return (long long)(((unsigned long long)p[7] << 56) |
+ ((unsigned long long)p[6] << 48) |
+ ((unsigned long long)p[5] << 40) |
+ ((unsigned long long)p[4] << 32) |
+ ((unsigned long long)p[3] << 24) |
+ ((unsigned long long)p[2] << 16) |
+ ((unsigned long long)p[1] << 8) |
+ (unsigned long long)p[0]);
+}
diff --git a/tools/applypatch/utils.h b/tools/applypatch/utils.h
new file mode 100644
index 0000000..d6d6f1d
--- /dev/null
+++ b/tools/applypatch/utils.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _BUILD_TOOLS_APPLYPATCH_UTILS_H
+#define _BUILD_TOOLS_APPLYPATCH_UTILS_H
+
+#include <stdio.h>
+
+// Read and write little-endian values of various sizes.
+
+void Write4(int value, FILE* f);
+void Write8(long long value, FILE* f);
+int Read2(unsigned char* p);
+int Read4(unsigned char* p);
+long long Read8(unsigned char* p);
+
+#endif // _BUILD_TOOLS_APPLYPATCH_UTILS_H
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 4e99bf5..5c738a2 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -7,6 +7,7 @@
echo "ro.build.display.id=$BUILD_DISPLAY_ID"
echo "ro.build.version.incremental=$BUILD_NUMBER"
echo "ro.build.version.sdk=$PLATFORM_SDK_VERSION"
+echo "ro.build.version.codename=$PLATFORM_VERSION_CODENAME"
echo "ro.build.version.release=$PLATFORM_VERSION"
echo "ro.build.date=`date`"
echo "ro.build.date.utc=`date +%s`"
@@ -19,6 +20,7 @@
echo "ro.product.name=$PRODUCT_NAME"
echo "ro.product.device=$TARGET_DEVICE"
echo "ro.product.board=$TARGET_BOOTLOADER_BOARD_NAME"
+echo "ro.product.cpu.abi=$TARGET_CPU_ABI"
echo "ro.product.manufacturer=$PRODUCT_MANUFACTURER"
echo "ro.product.locale.language=$PRODUCT_DEFAULT_LANGUAGE"
echo "ro.product.locale.region=$PRODUCT_DEFAULT_REGION"
diff --git a/tools/dexpreopt/afar/Android.mk b/tools/dexpreopt/afar/Android.mk
index d224675..9f1b987 100644
--- a/tools/dexpreopt/afar/Android.mk
+++ b/tools/dexpreopt/afar/Android.mk
@@ -24,6 +24,6 @@
LOCAL_SHARED_LIBRARIES := libz
LOCAL_MODULE := afar
-LOCAL_MODULE_TAGS := tests
+LOCAL_MODULE_TAGS := optional
include $(BUILD_EXECUTABLE)
diff --git a/tools/dexpreopt/dexopt-wrapper/Android.mk b/tools/dexpreopt/dexopt-wrapper/Android.mk
index e6ca389..ae2b6a3 100644
--- a/tools/dexpreopt/dexopt-wrapper/Android.mk
+++ b/tools/dexpreopt/dexopt-wrapper/Android.mk
@@ -31,6 +31,6 @@
LOCAL_MODULE := dexopt-wrapper
-LOCAL_MODULE_TAGS := tests
+LOCAL_MODULE_TAGS := optional
include $(BUILD_EXECUTABLE)
diff --git a/tools/droiddoc/src/Android.mk b/tools/droiddoc/src/Android.mk
index bf404b7..30270b5 100644
--- a/tools/droiddoc/src/Android.mk
+++ b/tools/droiddoc/src/Android.mk
@@ -47,6 +47,7 @@
SampleTagInfo.java \
Scoped.java \
SeeTagInfo.java \
+ SinceTagger.java \
Sorter.java \
SourcePositionInfo.java \
Stubs.java \
@@ -57,6 +58,7 @@
TypeInfo.java
LOCAL_JAVA_LIBRARIES := \
+ apicheck \
clearsilver
LOCAL_CLASSPATH := \
diff --git a/tools/droiddoc/src/ClassInfo.java b/tools/droiddoc/src/ClassInfo.java
index 2fd65e0..0941595 100644
--- a/tools/droiddoc/src/ClassInfo.java
+++ b/tools/droiddoc/src/ClassInfo.java
@@ -907,6 +907,7 @@
if (kind != null) {
data.setValue("class.kind", kind);
}
+ data.setValue("class.since", getSince());
// the containing package -- note that this can be passed to type_link,
// but it also contains the list of all of the packages
diff --git a/tools/droiddoc/src/DocInfo.java b/tools/droiddoc/src/DocInfo.java
index 2530dc2..3abb367 100644
--- a/tools/droiddoc/src/DocInfo.java
+++ b/tools/droiddoc/src/DocInfo.java
@@ -51,8 +51,17 @@
public abstract ContainerInfo parent();
+ public void setSince(String since) {
+ mSince = since;
+ }
+
+ public String getSince() {
+ return mSince;
+ }
+
private String mRawCommentText;
Comment mComment;
SourcePositionInfo mPosition;
+ private String mSince;
}
diff --git a/tools/droiddoc/src/DroidDoc.java b/tools/droiddoc/src/DroidDoc.java
index f664c41..b487629 100644
--- a/tools/droiddoc/src/DroidDoc.java
+++ b/tools/droiddoc/src/DroidDoc.java
@@ -97,6 +97,7 @@
String apiFile = null;
String debugStubsFile = "";
HashSet<String> stubPackages = null;
+ SinceTagger sinceTagger = new SinceTagger();
root = r;
@@ -186,6 +187,9 @@
apiXML = true;
apiFile = a[1];
}
+ else if (a[0].equals("-since")) {
+ sinceTagger.addVersion(a[1], a[2]);
+ }
}
// read some prefs from the template
@@ -204,6 +208,9 @@
TodoFile.writeTodoFile(todoFile);
}
+ // Apply @since tags from the XML file
+ sinceTagger.tagAll(Converter.rootClasses());
+
// HTML Pages
if (ClearPage.htmlDir != null) {
writeHTMLPages();
@@ -244,7 +251,7 @@
if (stubsDir != null) {
Stubs.writeStubs(stubsDir, apiXML, apiFile, stubPackages);
}
-
+
if (sdkValuePath != null) {
writeSdkValues(sdkValuePath);
}
@@ -394,9 +401,12 @@
if (option.equals("-apixml")) {
return 2;
}
+ if (option.equals("-since")) {
+ return 3;
+ }
return 0;
}
-
+
public static boolean validOptions(String[][] options, DocErrorReporter r)
{
for (String[] a: options) {
@@ -762,6 +772,7 @@
String name = pkg.name();
data.setValue("package.name", name);
+ data.setValue("package.since", pkg.getSince());
data.setValue("package.descr", "...description...");
makeClassListHDF(data, "package.interfaces",
diff --git a/tools/droiddoc/src/Errors.java b/tools/droiddoc/src/Errors.java
index dfeac88..95439f1 100644
--- a/tools/droiddoc/src/Errors.java
+++ b/tools/droiddoc/src/Errors.java
@@ -114,6 +114,7 @@
public static Error DEPRECATION_MISMATCH = new Error(13, WARNING);
public static Error MISSING_COMMENT = new Error(14, WARNING);
public static Error IO_ERROR = new Error(15, HIDDEN);
+ public static Error NO_SINCE_DATA = new Error(16, WARNING);
public static Error[] ERRORS = {
UNRESOLVED_LINK,
@@ -129,6 +130,7 @@
HIDDEN_SUPERCLASS,
DEPRECATED,
IO_ERROR,
+ NO_SINCE_DATA,
};
public static boolean setErrorLevel(int code, int level) {
diff --git a/tools/droiddoc/src/FieldInfo.java b/tools/droiddoc/src/FieldInfo.java
index 536d798..1c975e4 100644
--- a/tools/droiddoc/src/FieldInfo.java
+++ b/tools/droiddoc/src/FieldInfo.java
@@ -223,6 +223,7 @@
TagInfo.makeHDF(data, base + ".descr", inlineTags());
TagInfo.makeHDF(data, base + ".deprecated", comment().deprecatedTags());
TagInfo.makeHDF(data, base + ".seeAlso", comment().seeTags());
+ data.setValue(base + ".since", getSince());
data.setValue(base + ".final", isFinal() ? "final" : "");
data.setValue(base + ".static", isStatic() ? "static" : "");
if (isPublic()) {
diff --git a/tools/droiddoc/src/MethodInfo.java b/tools/droiddoc/src/MethodInfo.java
index ca30665..bded88b 100644
--- a/tools/droiddoc/src/MethodInfo.java
+++ b/tools/droiddoc/src/MethodInfo.java
@@ -15,9 +15,8 @@
*/
import org.clearsilver.HDF;
-import org.clearsilver.CS;
+
import java.util.*;
-import java.io.*;
public class MethodInfo extends MemberInfo
{
@@ -357,6 +356,19 @@
return s;
}
+ /**
+ * Returns a name consistent with the {@link
+ * com.android.apicheck.MethodInfo#getHashableName()}.
+ */
+ public String getHashableName() {
+ StringBuilder result = new StringBuilder();
+ result.append(name());
+ for (ParameterInfo pInfo : mParameters) {
+ result.append(":").append(pInfo.type().fullName());
+ }
+ return result.toString();
+ }
+
private boolean inList(ClassInfo item, ThrowsTagInfo[] list)
{
int len = list.length;
@@ -545,6 +557,7 @@
TagInfo.makeHDF(data, base + ".descr", inlineTags());
TagInfo.makeHDF(data, base + ".deprecated", deprecatedTags());
TagInfo.makeHDF(data, base + ".seeAlso", seeTags());
+ data.setValue(base + ".since", getSince());
ParamTagInfo.makeHDF(data, base + ".paramTags", paramTags());
AttrTagInfo.makeReferenceHDF(data, base + ".attrRefs", comment().attrTags());
ThrowsTagInfo.makeHDF(data, base + ".throws", throwsTags());
diff --git a/tools/droiddoc/src/SinceTagger.java b/tools/droiddoc/src/SinceTagger.java
new file mode 100644
index 0000000..a34814c
--- /dev/null
+++ b/tools/droiddoc/src/SinceTagger.java
@@ -0,0 +1,182 @@
+// Copyright 2009 Google Inc. All Rights Reserved.
+
+import com.android.apicheck.*;
+
+import java.util.*;
+
+/**
+ * Applies version information to the DroidDoc class model from apicheck XML
+ * files. Sample usage:
+ * <pre>
+ * ClassInfo[] classInfos = ...
+ *
+ * SinceTagger sinceTagger = new SinceTagger()
+ * sinceTagger.addVersion("frameworks/base/api/1.xml", "Android 1.0")
+ * sinceTagger.addVersion("frameworks/base/api/2.xml", "Android 1.5")
+ * sinceTagger.tagAll(...);
+ * </pre>
+ */
+public class SinceTagger {
+
+ private final Map<String, String> xmlToName
+ = new LinkedHashMap<String, String>();
+
+ /**
+ * Specifies the apicheck XML file and the API version it holds. Calls to
+ * this method should be called in order from oldest version to newest.
+ */
+ public void addVersion(String file, String name) {
+ xmlToName.put(file, name);
+ }
+
+ public void tagAll(ClassInfo[] classDocs) {
+ // read through the XML files in order, applying their since information
+ // to the Javadoc models
+ for (Map.Entry<String, String> versionSpec : xmlToName.entrySet()) {
+ String xmlFile = versionSpec.getKey();
+ String versionName = versionSpec.getValue();
+ ApiInfo specApi = new ApiCheck().parseApi(xmlFile);
+
+ applyVersionsFromSpec(versionName, specApi, classDocs);
+ }
+
+ if (!xmlToName.isEmpty()) {
+ warnForMissingVersions(classDocs);
+ }
+ }
+
+ /**
+ * Applies the version information to {@code classDocs} where not already
+ * present.
+ *
+ * @param versionName the version name
+ * @param specApi the spec for this version. If a symbol is in this spec, it
+ * was present in the named version
+ * @param classDocs the doc model to update
+ */
+ private void applyVersionsFromSpec(String versionName,
+ ApiInfo specApi, ClassInfo[] classDocs) {
+ for (ClassInfo classDoc : classDocs) {
+ com.android.apicheck.PackageInfo packageSpec
+ = specApi.getPackages().get(classDoc.containingPackage().name());
+
+ if (packageSpec == null) {
+ continue;
+ }
+
+ com.android.apicheck.ClassInfo classSpec
+ = packageSpec.allClasses().get(classDoc.name());
+
+ if (classSpec == null) {
+ continue;
+ }
+
+ versionPackage(versionName, classDoc.containingPackage());
+ versionClass(versionName, classDoc);
+ versionConstructors(versionName, classSpec, classDoc);
+ versionFields(versionName, classSpec, classDoc);
+ versionMethods(versionName, classSpec, classDoc);
+ }
+ }
+
+ /**
+ * Applies version information to {@code doc} where not already present.
+ */
+ private void versionPackage(String versionName, PackageInfo doc) {
+ if (doc.getSince() == null) {
+ doc.setSince(versionName);
+ }
+ }
+
+ /**
+ * Applies version information to {@code doc} where not already present.
+ */
+ private void versionClass(String versionName, ClassInfo doc) {
+ if (doc.getSince() == null) {
+ doc.setSince(versionName);
+ }
+ }
+
+ /**
+ * Applies version information from {@code spec} to {@code doc} where not
+ * already present.
+ */
+ private void versionConstructors(String versionName,
+ com.android.apicheck.ClassInfo spec, ClassInfo doc) {
+ for (MethodInfo constructor : doc.constructors()) {
+ if (constructor.getSince() == null
+ && spec.allConstructors().containsKey(constructor.getHashableName())) {
+ constructor.setSince(versionName);
+ }
+ }
+ }
+
+ /**
+ * Applies version information from {@code spec} to {@code doc} where not
+ * already present.
+ */
+ private void versionFields(String versionName,
+ com.android.apicheck.ClassInfo spec, ClassInfo doc) {
+ for (FieldInfo field : doc.fields()) {
+ if (field.getSince() == null
+ && spec.allFields().containsKey(field.name())) {
+ field.setSince(versionName);
+ }
+ }
+ }
+
+ /**
+ * Applies version information from {@code spec} to {@code doc} where not
+ * already present.
+ */
+ private void versionMethods(String versionName,
+ com.android.apicheck.ClassInfo spec, ClassInfo doc) {
+ for (MethodInfo method : doc.methods()) {
+ if (method.getSince() != null) {
+ continue;
+ }
+
+ for (com.android.apicheck.ClassInfo superclass : spec.hierarchy()) {
+ if (superclass.allMethods().containsKey(method.getHashableName())) {
+ method.setSince(versionName);
+ break;
+ }
+ }
+ }
+ }
+
+ /**
+ * Warns if any symbols are missing version information. When configured
+ * properly, this will yield zero warnings because {@code apicheck}
+ * guarantees that all symbols are present in the most recent API.
+ */
+ private void warnForMissingVersions(ClassInfo[] classDocs) {
+ for (ClassInfo claz : classDocs) {
+ if (claz.getSince() == null) {
+ Errors.error(Errors.NO_SINCE_DATA, claz.position(),
+ "XML missing class " + claz.qualifiedName());
+ }
+ for (FieldInfo field : claz.fields()) {
+ if (field.getSince() == null) {
+ Errors.error(Errors.NO_SINCE_DATA, field.position(),
+ "XML missing field "
+ + claz.qualifiedName() + "#" + field .name());
+ }
+ }
+ for (MethodInfo constructor : claz.constructors()) {
+ if (constructor.getSince() == null) {
+ Errors.error(Errors.NO_SINCE_DATA, constructor.position(),
+ "XML missing constructor "
+ + claz.qualifiedName() + "#" + constructor.getHashableName());
+ }
+ }
+ for (MethodInfo method : claz.methods()) {
+ if (method.getSince() == null) {
+ Errors.error(Errors.NO_SINCE_DATA, method.position(),
+ "XML missing method "
+ + claz.qualifiedName() + "#" + method .getHashableName());
+ }
+ }
+ }
+ }
+}
diff --git a/tools/droiddoc/templates-pdk/customization.cs b/tools/droiddoc/templates-pdk/customization.cs
index 01b6e96..e2d6682 100644
--- a/tools/droiddoc/templates-pdk/customization.cs
+++ b/tools/droiddoc/templates-pdk/customization.cs
@@ -5,7 +5,7 @@
def:custom_masthead() ?>
<div id="header">
<div id="headerLeft">
- <a href="<?cs var:toroot ?>index.html" tabindex="-1"><img
+ <a href="<?cs var:toroot ?>guide/index.html" tabindex="-1"><img
src="<?cs var:toroot ?>assets/images/open_source.png" alt="Open Source Project: Platform Development Kit" /></a>
<ul class="<?cs
if:reference ?> <?cs
@@ -15,10 +15,10 @@
elif:community ?> <?cs
elif:publish ?> <?cs
elif:about ?> <?cs /if ?>">
- <li id="guide-link"><a href="<?cs var:toroot ?>index.html"
- onClick="return loadLast('guide)'"><span>Porting Guide</span></a></li>
+ <!--<li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
+ onClick="return loadLast('guide)'"><span>Dev Guide</span></a></li>
<li id="opensource-link"><a href="http://source.android.com/"
- onClick="return loadLast('open')"><span>Open Source</span></a></li>
+ onClick="return loadLast('open')"><span>Open Source</span></a></li>-->
</ul>
</div>
<div id="headerRight">
diff --git a/tools/droiddoc/templates-pdk/head_tag.cs b/tools/droiddoc/templates-pdk/head_tag.cs
index 1a7f1a8..47b332a 100644
--- a/tools/droiddoc/templates-pdk/head_tag.cs
+++ b/tools/droiddoc/templates-pdk/head_tag.cs
@@ -7,7 +7,7 @@
if:sdk.version ?> (<?cs
var:sdk.version ?>)<?cs
/if ?> | <?cs
- /if ?>Android Developers</title>
+ /if ?>Android Open Source</title>
<link href="<?cs var:toroot ?>assets/android-developer-docs-devguide.css" rel="stylesheet" type="text/css" />
<link href="<?cs var:toroot ?>assets-pdk/pdk-local.css" rel="stylesheet" type="text/css" />
<script src="<?cs var:toroot ?>assets/search_autocomplete.js" type="text/javascript"></script>
diff --git a/tools/droiddoc/templates-sdk/customization.cs b/tools/droiddoc/templates-sdk/customization.cs
index 384e43f..0cb85e8 100644
--- a/tools/droiddoc/templates-sdk/customization.cs
+++ b/tools/droiddoc/templates-sdk/customization.cs
@@ -1,45 +1,74 @@
<?cs # This default template file is meant to be replaced. ?>
<?cs # Use the -tempatedir arg to javadoc to set your own directory with a replacement for this file in it. ?>
+
+<?cs # The default search box that goes in the header ?><?cs
+def:default_search_box() ?>
+ <div id="search" >
+ <div id="searchForm">
+ <form accept-charset="utf-8" class="gsc-search-box"
+ onsubmit="return submit_search()">
+ <table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody>
+ <tr>
+ <td class="gsc-input">
+ <input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off"
+ title="search developer docs" name="q"
+ value="search developer docs"
+ onFocus="search_focus_changed(this, true)"
+ onBlur="search_focus_changed(this, false)"
+ onkeydown="return search_changed(event, true, '<?cs var:toroot?>')"
+ onkeyup="return search_changed(event, false, '<?cs var:toroot?>')" />
+ <div id="search_filtered_div" class="no-display">
+ <table id="search_filtered" cellspacing=0>
+ </table>
+ </div>
+ </td>
+ <td class="gsc-search-button">
+ <input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" />
+ </td>
+ <td class="gsc-clear-button">
+ <div title="clear results" class="gsc-clear-button"> </div>
+ </td>
+ </tr></tbody>
+ </table>
+ </form>
+ </div><!-- searchForm -->
+ </div><!-- search --><?cs
+/def ?>
+
<?cs
def:custom_masthead() ?>
<div id="header">
<div id="headerLeft">
<a href="<?cs var:toroot ?>index.html" tabindex="-1"><img
src="<?cs var:toroot ?>assets/images/bg_logo.png" alt="Android Developers" /></a>
- <ul class="<?cs
- if:reference ?>reference<?cs
- elif:guide ?>guide<?cs
- elif:sdk ?>sdk<?cs
- elif:home ?>home<?cs
- elif:community ?>community<?cs
- elif:publish ?>publish<?cs
- elif:about ?>about<?cs /if ?>">
- <li id="home-link"><a href="<?cs var:toroot ?><?cs
- if:android.whichdoc != "online" ?>offline.html<?cs
- else ?>index.html<?cs /if ?>">
- <span>Home</span></a></li>
- <li id="sdk-link"><a href="<?cs var:toroot ?>sdk/<?cs var:sdk.current ?>/index.html"><span>SDK</span></a></li>
- <li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
- onClick="return loadLast('guide')"><span>Dev Guide</span></a></li>
- <li id="reference-link"><a href="<?cs var:toroot ?>reference/packages.html"
- onClick="return loadLast('reference')"><span>Reference</span></a></li>
- <li><a href="http://android-developers.blogspot.com"><span>Blog</span></a></li>
- <li id="community-link"><a href="<?cs var:toroot ?>community/index.html"><span>Community</span></a></li>
- </ul>
+ <?cs include:"header_tabs.cs" ?> <?cs # The links are extracted so we can better manage localization ?>
</div>
<div id="headerRight">
<div id="headerLinks">
- <!-- <img src="<?cs var:toroot ?>assets/images/icon_world.jpg" alt="" /> -->
- <span class="text">
- <!-- <a href="#">English</a> | -->
- <a href="http://www.android.com">Android.com</a>
- </span>
+ <!-- <img src="<?cs var:toroot ?>assets/images/icon_world.jpg" alt="" /> -->
+ <span id="language">
+ <select name="language" onChange="changeLangPref(this.value)">
+ <option value="en">English</option>
+ <!-- <option value="ja"></option> -->
+ </select>
+ <script type="text/javascript">
+ <!--
+ loadLangPref();
+ //-->
+ </script>
+ </span>
+ <a href="http://www.android.com">Android.com</a>
</div><?cs
call:default_search_box() ?>
</div><!-- headerRight -->
+ <script type="text/javascript">
+ <!--
+ changeTabLang(getLangPref());
+ //-->
+ </script>
</div><!-- header --><?cs
-/def ?><?cs # custom_masthead ?>
+/def ?>
<?cs
def:sdk_nav() ?>
@@ -66,22 +95,80 @@
</script>
<?cs /def ?>
-<?cs
-def:publish_nav() ?>
- <div class="g-section g-tpl-180" id="body-content">
- <div class="g-unit g-first" id="side-nav">
- <div id="devdoc-nav"><?cs
- include:"../../../../frameworks/base/docs/html/publish/publish_toc.cs" ?>
- </div>
+<?cs # The default side navigation for the reference docs ?><?cs
+def:default_left_nav() ?>
+ <div class="g-section g-tpl-240" id="body-content">
+ <div class="g-unit g-first side-nav-resizable" id="side-nav">
+ <div id="swapper">
+ <div id="nav-panels">
+ <div id="resize-packages-nav">
+ <div id="packages-nav">
+ <div id="index-links"><nobr>
+ <a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
+ <a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
+ </div>
+ <ul><?cs
+ each:pkg=docs.packages ?>
+ <li <?cs if:(class.package.name == pkg.name) || (package.name == pkg.name)?>class="selected"<?cs /if ?>><?cs call:package_link(pkg) ?></li><?cs
+ /each ?>
+ </ul><br/>
+ </div> <!-- end packages -->
+ </div> <!-- end resize-packages -->
+ <div id="classes-nav"><?cs
+ if:subcount(class.package) ?>
+ <ul>
+ <?cs call:list("Interfaces", class.package.interfaces) ?>
+ <?cs call:list("Classes", class.package.classes) ?>
+ <?cs call:list("Enums", class.package.enums) ?>
+ <?cs call:list("Exceptions", class.package.exceptions) ?>
+ <?cs call:list("Errors", class.package.errors) ?>
+ </ul><?cs
+ elif:subcount(package) ?>
+ <ul>
+ <?cs call:class_link_list("Interfaces", package.interfaces) ?>
+ <?cs call:class_link_list("Classes", package.classes) ?>
+ <?cs call:class_link_list("Enums", package.enums) ?>
+ <?cs call:class_link_list("Exceptions", package.exceptions) ?>
+ <?cs call:class_link_list("Errors", package.errors) ?>
+ </ul><?cs
+ else ?>
+ <script>
+ /*addLoadEvent(maxPackageHeight);*/
+ </script>
+ <p style="padding:10px">Select a package to view its members</p><?cs
+ /if ?><br/>
+ </div><!-- end classes -->
+ </div><!-- end nav-panels -->
+ <div id="nav-tree" style="display:none">
+ <div id="index-links"><nobr>
+ <a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
+ <a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
+ </div>
+ </div><!-- end nav-tree -->
+ </div><!-- end swapper -->
</div> <!-- end side-nav -->
-<?cs /def ?>
+ <script>
+ if (!isMobile) {
+ $("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav");
+ chooseDefaultNav();
+ if ($("#nav-tree").is(':visible')) init_navtree("nav-tree", "<?cs var:toroot ?>", NAVTREE_DATA);
+ else {
+ addLoadEvent(function() {
+ scrollIntoView("packages-nav");
+ scrollIntoView("classes-nav");
+ });
+ }
+ $("#swapper").css({borderBottom:"2px solid #aaa"});
+ } else {
+ swapNav(); // tree view should be used on mobile
+ }
+ </script><?cs
+/def ?>
<?cs
def:custom_left_nav() ?><?cs
if:guide ?><?cs
call:guide_nav() ?><?cs
- elif:publish ?><?cs
- call:publish_nav() ?><?cs
elif:sdk ?><?cs
call:sdk_nav() ?><?cs
else ?><?cs
diff --git a/tools/droiddoc/templates-sdk/header_tabs.cs b/tools/droiddoc/templates-sdk/header_tabs.cs
new file mode 100644
index 0000000..12b747e
--- /dev/null
+++ b/tools/droiddoc/templates-sdk/header_tabs.cs
@@ -0,0 +1,35 @@
+<ul id="header-tabs" class="<?cs
+ if:reference ?>reference<?cs
+ elif:guide ?>guide<?cs
+ elif:sdk ?>sdk<?cs
+ elif:home ?>home<?cs
+ elif:community ?>community<?cs
+ elif:publish ?>publish<?cs
+ elif:about ?>about<?cs /if ?>">
+
+ <li id="home-link"><a href="<?cs var:toroot ?><?cs if:android.whichdoc != "online" ?>offline.html<?cs else ?>index.html<?cs /if ?>">
+ <span class="en">Home</span>
+ <span class="ja"></span>
+ </a></li>
+ <li id="sdk-link"><a href="<?cs var:toroot ?>sdk/<?cs var:sdk.current ?>/index.html">
+ <span class="en">SDK</span>
+ <span class="ja"></span>
+ </a></li>
+ <li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html" onClick="return loadLast('guide')">
+ <span class="en">Dev Guide</span>
+ <span class="ja"></span>
+ </a></li>
+ <li id="reference-link"><a href="<?cs var:toroot ?>reference/packages.html" onClick="return loadLast('reference')">
+ <span class="en">Reference</span>
+ <span class="ja"></span>
+ </a></li>
+ <li><a href="http://android-developers.blogspot.com">
+ <span class="en">Blog</span>
+ <span class="ja"></span>
+ </a></li>
+ <li id="community-link"><a href="<?cs var:toroot ?>community/index.html">
+ <span class="en">Community</span>
+ <span class="ja"></span>
+ </a></li>
+
+</ul>
\ No newline at end of file
diff --git a/tools/droiddoc/templates/assets/android-developer-core.css b/tools/droiddoc/templates/assets/android-developer-core.css
index 79e40b2..8a1b9cd 100644
--- a/tools/droiddoc/templates/assets/android-developer-core.css
+++ b/tools/droiddoc/templates/assets/android-developer-core.css
@@ -268,20 +268,16 @@
#headerLinks {
margin:10px 10px 0 0;
height:13px;
-}
-
-#headerLinks .text {
- text-decoration: none;
- color: #7FA9B5;
font-size: 11px;
vertical-align: top;
}
#headerLinks a {
- text-decoration: underline;
color: #7FA9B5;
- font-size: 11px;
- vertical-align: top;
+}
+
+#language {
+ margin:0 10px;
}
#search {
diff --git a/tools/droiddoc/templates/assets/android-developer-docs.js b/tools/droiddoc/templates/assets/android-developer-docs.js
index 2a8c3bf..016fa4e 100644
--- a/tools/droiddoc/templates/assets/android-developer-docs.js
+++ b/tools/droiddoc/templates/assets/android-developer-docs.js
@@ -4,7 +4,7 @@
var sidenav;
var content;
var HEADER_HEIGHT = 117;
-var cookie_style = 'android_developer';
+var cookie_namespace = 'android_developer';
var NAV_PREF_TREE = "tree";
var NAV_PREF_PANELS = "panels";
var nav_pref;
@@ -70,8 +70,8 @@
$("#nav-tree").css({height:swapperHeight + "px"});
}
-function getCookie(cookie) {
- var myCookie = cookie_style+"_"+cookie+"=";
+function readCookie(cookie) {
+ var myCookie = cookie_namespace+"_"+cookie+"=";
if (document.cookie) {
var index = document.cookie.indexOf(myCookie);
if (index != -1) {
@@ -87,16 +87,15 @@
return 0;
}
-function writeCookie(cookie, val, path, expiration) {
+function writeCookie(cookie, val, section, expiration) {
if (!val) return;
- var date = new Date();
- date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week
- expiration = expiration ? expiration : date.toGMTString();
- if (location.href.indexOf("/reference/") != -1) {
- document.cookie = cookie_style+'_reference_'+cookie+'='+val+'; expires='+expiration+'; path='+'/'+path;
- } else if (location.href.indexOf("/guide/") != -1) {
- document.cookie = cookie_style+'_guide_'+cookie+'='+val+'; expires='+expiration+'; path='+'/'+path;
+ section = section == null ? "_" : "_"+section+"_";
+ if (expiration == null) {
+ var date = new Date();
+ date.setTime(date.getTime()+(10*365*24*60*60*1000)); // default expiration is one week
+ expiration = date.toGMTString();
}
+ document.cookie = cookie_namespace+section+cookie+"="+val+"; expires="+expiration+"; path=/";
}
function init() {
@@ -116,8 +115,8 @@
if (!isMobile) {
$("#resize-packages-nav").resizable({handles: "s", resize: function(e, ui) { resizeHeight(); } });
$(".side-nav-resizable").resizable({handles: "e", resize: function(e, ui) { resizeWidth(); } });
- var cookieWidth = getCookie(cookiePath+'width');
- var cookieHeight = getCookie(cookiePath+'height');
+ var cookieWidth = readCookie(cookiePath+'width');
+ var cookieHeight = readCookie(cookiePath+'height');
if (cookieWidth) {
restoreWidth(cookieWidth);
} else if ($(".side-nav-resizable").length) {
@@ -175,7 +174,9 @@
$("#packages-nav").css({height:parseInt(resizePackagesNav.css("height")) - 6 + "px"}); //move 6px for handle
devdocNav.css({height:sidenav.css("height")});
$("#nav-tree").css({height:swapperHeight + "px"});
- writeCookie("height", resizePackagesNav.css("height"), "", null);
+
+ var section = location.pathname.substring(1,location.pathname.indexOf("/",1));
+ writeCookie("height", resizePackagesNav.css("height"), section, null);
}
function resizeWidth() {
@@ -190,7 +191,9 @@
resizePackagesNav.css({width:sidenavWidth});
classesNav.css({width:sidenavWidth});
$("#packages-nav").css({width:sidenavWidth});
- writeCookie("width", sidenavWidth, "", null);
+
+ var section = location.pathname.substring(1,location.pathname.indexOf("/",1));
+ writeCookie("width", sidenavWidth, section, null);
}
function resizeAll() {
@@ -207,7 +210,7 @@
if (location.indexOf("/"+cookiePath+"/") != -1) {
return true;
}
- var lastPage = getCookie(cookiePath + "_lastpage");
+ var lastPage = readCookie(cookiePath + "_lastpage");
if (lastPage) {
window.location = lastPage;
return false;
@@ -216,11 +219,11 @@
}
$(window).unload(function(){
- var href = location.href;
- if (href.indexOf("/reference/") != -1) {
- writeCookie("lastpage", href, "", null);
- } else if (href.indexOf("/guide/") != -1) {
- writeCookie("lastpage", href, "", null);
+ var path = location.pathname;
+ if (path.indexOf("/reference/") != -1) {
+ writeCookie("lastpage", path, "reference", null);
+ } else if (path.indexOf("/guide/") != -1) {
+ writeCookie("lastpage", path, "guide", null);
}
});
@@ -257,7 +260,7 @@
}
function getNavPref() {
- var v = getCookie('reference_nav');
+ var v = readCookie('reference_nav');
if (v != NAV_PREF_TREE) {
v = NAV_PREF_PANELS;
}
@@ -283,7 +286,7 @@
}
var date = new Date();
date.setTime(date.getTime()+(10*365*24*60*60*1000)); // keep this for 10 years
- writeCookie("nav", nav_pref, "", date.toGMTString());
+ writeCookie("nav", nav_pref, null, date.toGMTString());
$("#nav-panels").toggle();
$("#panel-link").toggle();
@@ -349,3 +352,57 @@
}
return false;
}
+
+
+function changeTabLang(lang) {
+ var nodes = $("#header-tabs").find("."+lang);
+ for (i=0; i < nodes.length; i++) { // for each node in this language
+ var node = $(nodes[i]);
+ node.siblings().css("display","none"); // hide all siblings
+ if (node.not(":empty").length != 0) { //if this languages node has a translation, show it
+ node.css("display","inline");
+ } else { //otherwise, show English instead
+ node.css("display","none");
+ node.siblings().filter(".en").css("display","inline");
+ }
+ }
+}
+
+function changeNavLang(lang) {
+ var nodes = $("#side-nav").find("."+lang);
+ for (i=0; i < nodes.length; i++) { // for each node in this language
+ var node = $(nodes[i]);
+ node.siblings().css("display","none"); // hide all siblings
+ if (node.not(":empty").length != 0) { // if this languages node has a translation, show it
+ node.css("display","inline");
+ } else { // otherwise, show English instead
+ node.css("display","none");
+ node.siblings().filter(".en").css("display","inline");
+ }
+ }
+}
+
+function changeDocLang(lang) {
+ changeTabLang(lang);
+ changeNavLang(lang);
+}
+
+function changeLangPref(lang) {
+ var date = new Date();
+ date.setTime(date.getTime()+(50*365*24*60*60*1000)); // keep this for 50 years
+ writeCookie("pref_lang", lang, null, date);
+
+ changeDocLang(lang);
+}
+
+function loadLangPref() {
+ var lang = readCookie("pref_lang");
+ if (lang != 0) {
+ $("#language").find("option[value='"+lang+"']").attr("selected",true);
+ }
+}
+
+function getLangPref() {
+ return $("#language").find(":selected").attr("value");
+}
+
diff --git a/tools/droiddoc/templates/class.cs b/tools/droiddoc/templates/class.cs
index 1077886..41d34dd 100644
--- a/tools/droiddoc/templates/class.cs
+++ b/tools/droiddoc/templates/class.cs
@@ -180,6 +180,7 @@
<?cs /if ?>
<?cs call:see_also_tags(class.seeAlso) ?>
+<?cs call:since_tags(class) ?>
</div><!-- jd-descr -->
diff --git a/tools/droiddoc/templates/customization.cs b/tools/droiddoc/templates/customization.cs
index d437c2c..1988a89 100644
--- a/tools/droiddoc/templates/customization.cs
+++ b/tools/droiddoc/templates/customization.cs
@@ -2,6 +2,10 @@
<?cs # Use the -templatedir arg to javadoc to set your own directory with a ?>
<?cs # replacement for this file in it. ?>
+
+<?cs def:default_search_box() ?><?cs /def ?>
+<?cs def:default_left_nav() ?><?cs /def ?>
+
<?cs # appears at the top of every page ?><?cs
def:custom_masthead() ?>
<div id="header">
@@ -23,4 +27,4 @@
<?cs def:custom_buildinfo() ?>Build <?cs var:page.build ?> - <?cs var:page.now ?><?cs /def ?>
<?cs # appears on the side of the page ?>
-<?cs def:custom_left_nav() ?><?cs call:default_left_nav() ?><?cs /def ?>
+<?cs def:custom_left_nav() ?><?cs call:default_left_nav() ?><?cs /def ?>
\ No newline at end of file
diff --git a/tools/droiddoc/templates/macros.cs b/tools/droiddoc/templates/macros.cs
index 3ba743b..1ca2f8b 100644
--- a/tools/droiddoc/templates/macros.cs
+++ b/tools/droiddoc/templates/macros.cs
@@ -115,9 +115,15 @@
/if ?>
<?cs /def ?>
+<?cs # print the Since: section ?><?cs
+def:since_tags(obj) ?>
+ <div class="jd-tagdata">
+ <h5 class="jd-tagtitle">Since <?cs var:obj.since ?></h5>
+ </div>
+<?cs /def ?>
<?cs # Print the long-form description for something.
- Uses the following fields: deprecated descr seeAlso ?><?cs
+ Uses the following fields: deprecated descr seeAlso since ?><?cs
def:description(obj) ?><?cs
call:deprecated_warning(obj) ?>
<div class="jd-tagdata jd-tagdescr"><p><?cs call:tag_list(obj.descr) ?></p></div><?cs
@@ -165,6 +171,7 @@
</div><?cs
/if ?><?cs
call:see_also_tags(obj.seeAlso) ?><?cs
+ call:since_tags(obj) ?><?cs
/def ?>
<?cs # A table of links to classes with descriptions, as in a package file or the nested classes ?><?cs
@@ -233,108 +240,5 @@
</div><?cs
/def ?>
-<?cs # The default side navigation for the reference docs ?><?cs
-def:default_left_nav() ?>
- <div class="g-section g-tpl-240" id="body-content">
- <div class="g-unit g-first side-nav-resizable" id="side-nav">
- <div id="swapper">
- <div id="nav-panels">
- <div id="resize-packages-nav">
- <div id="packages-nav">
- <div id="index-links"><nobr>
- <a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
- <a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
- </div>
- <ul><?cs
- each:pkg=docs.packages ?>
- <li <?cs if:(class.package.name == pkg.name) || (package.name == pkg.name)?>class="selected"<?cs /if ?>><?cs call:package_link(pkg) ?></li><?cs
- /each ?>
- </ul><br/>
- </div> <!-- end packages -->
- </div> <!-- end resize-packages -->
- <div id="classes-nav"><?cs
- if:subcount(class.package) ?>
- <ul>
- <?cs call:list("Interfaces", class.package.interfaces) ?>
- <?cs call:list("Classes", class.package.classes) ?>
- <?cs call:list("Enums", class.package.enums) ?>
- <?cs call:list("Exceptions", class.package.exceptions) ?>
- <?cs call:list("Errors", class.package.errors) ?>
- </ul><?cs
- elif:subcount(package) ?>
- <ul>
- <?cs call:class_link_list("Interfaces", package.interfaces) ?>
- <?cs call:class_link_list("Classes", package.classes) ?>
- <?cs call:class_link_list("Enums", package.enums) ?>
- <?cs call:class_link_list("Exceptions", package.exceptions) ?>
- <?cs call:class_link_list("Errors", package.errors) ?>
- </ul><?cs
- else ?>
- <script>
- /*addLoadEvent(maxPackageHeight);*/
- </script>
- <p style="padding:10px">Select a package to view its members</p><?cs
- /if ?><br/>
- </div><!-- end classes -->
- </div><!-- end nav-panels -->
- <div id="nav-tree" style="display:none">
- <div id="index-links"><nobr>
- <a href="<?cs var:toroot ?>reference/packages.html" <?cs if:(page.title == "Package Index") ?>class="selected"<?cs /if ?> >Package Index</a> |
- <a href="<?cs var:toroot ?>reference/classes.html" <?cs if:(page.title == "Class Index") ?>class="selected"<?cs /if ?>>Class Index</a></nobr>
- </div>
- </div><!-- end nav-tree -->
- </div><!-- end swapper -->
- </div> <!-- end side-nav -->
- <script>
- if (!isMobile) {
- $("<a href='#' id='nav-swap' onclick='swapNav();return false;' style='font-size:10px;line-height:9px;margin-left:1em;text-decoration:none;'><span id='tree-link'>Use Tree Navigation</span><span id='panel-link' style='display:none'>Use Panel Navigation</span></a>").appendTo("#side-nav");
- chooseDefaultNav();
- if ($("#nav-tree").is(':visible')) init_navtree("nav-tree", "<?cs var:toroot ?>", NAVTREE_DATA);
- else {
- addLoadEvent(function() {
- scrollIntoView("packages-nav");
- scrollIntoView("classes-nav");
- });
- }
- $("#swapper").css({borderBottom:"2px solid #aaa"});
- } else {
- swapNav(); // tree view should be used on mobile
- }
- </script><?cs
-/def ?>
-
-<?cs # The default search box that goes in the header ?><?cs
-def:default_search_box() ?>
- <div id="search" >
- <div id="searchForm">
- <form accept-charset="utf-8" class="gsc-search-box"
- onsubmit="return submit_search()">
- <table class="gsc-search-box" cellpadding="0" cellspacing="0"><tbody>
- <tr>
- <td class="gsc-input">
- <input id="search_autocomplete" class="gsc-input" type="text" size="33" autocomplete="off"
- title="search developer docs" name="q"
- value="search developer docs"
- onFocus="search_focus_changed(this, true)"
- onBlur="search_focus_changed(this, false)"
- onkeydown="return search_changed(event, true, '<?cs var:toroot?>')"
- onkeyup="return search_changed(event, false, '<?cs var:toroot?>')" />
- <div id="search_filtered_div" class="no-display">
- <table id="search_filtered" cellspacing=0>
- </table>
- </div>
- </td>
- <td class="gsc-search-button">
- <input type="submit" value="Search" title="search" id="search-button" class="gsc-search-button" />
- </td>
- <td class="gsc-clear-button">
- <div title="clear results" class="gsc-clear-button"> </div>
- </td>
- </tr></tbody>
- </table>
- </form>
- </div><!-- searchForm -->
- </div><!-- search --><?cs
-/def ?>
<?cs include:"customization.cs" ?>
diff --git a/tools/droiddoc/templates/package-descr.cs b/tools/droiddoc/templates/package-descr.cs
index 385ce23..112db4b 100644
--- a/tools/droiddoc/templates/package-descr.cs
+++ b/tools/droiddoc/templates/package-descr.cs
@@ -21,6 +21,7 @@
<div class="jd-descr">
<p><?cs call:tag_list(package.descr) ?></p>
</div>
+<?cs call:since_tags(package) ?>
<?cs include:"footer.cs" ?>
</div><!-- end jd-content -->
diff --git a/tools/droiddoc/test/stubs/func.sh b/tools/droiddoc/test/stubs/func.sh
index 1ad4bd5..ea4fe75 100644
--- a/tools/droiddoc/test/stubs/func.sh
+++ b/tools/droiddoc/test/stubs/func.sh
@@ -26,21 +26,22 @@
STUBS_DIR=$3
OBJ_DIR=out/stubs/$ID
+ PLATFORM=${HOST_OS}-${HOST_ARCH}
rm -rf $OBJ_DIR &> /dev/null
mkdir -p $OBJ_DIR
find $SRC_DIR -name '*.java' > $OBJ_DIR/javadoc-src-list
( \
- LD_LIBRARY_PATH=out/host/darwin-x86/lib \
+ LD_LIBRARY_PATH=out/host/$PLATFORM/lib \
javadoc \
\@$OBJ_DIR/javadoc-src-list \
-J-Xmx512m \
- -J-Djava.library.path=out/host/darwin-x86/lib \
+ -J-Djava.library.path=out/host/$PLATFORM/lib \
\
-quiet \
-doclet DroidDoc \
- -docletpath out/host/darwin-x86/framework/clearsilver.jar:out/host/darwin-x86/framework/droiddoc.jar \
+ -docletpath out/host/$PLATFORM/framework/clearsilver.jar:out/host/$PLATFORM/framework/droiddoc.jar:out/host/$PLATFORM/framework/apicheck.jar \
-templatedir tools/droiddoc/templates \
-classpath out/target/common/obj/JAVA_LIBRARIES/core_intermediates/classes.jar:out/target/common/obj/JAVA_LIBRARIES/ext_intermediates/classes.jar:out/target/common/obj/JAVA_LIBRARIES/framework_intermediates/classes.jar \
-sourcepath $SRC_DIR:out/target/common/obj/JAVA_LIBRARIES/core_intermediates/classes.jar:out/target/common/obj/JAVA_LIBRARIES/ext_intermediates/classes.jar:out/target/common/obj/JAVA_LIBRARIES/framework_intermediates/classes.jar \
diff --git a/tools/droiddoc/test/stubs/run.sh b/tools/droiddoc/test/stubs/run.sh
index f237a7d..2ea15a6 100755
--- a/tools/droiddoc/test/stubs/run.sh
+++ b/tools/droiddoc/test/stubs/run.sh
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-DIR=tools/droiddoc/test/stubs
+DIR=build/tools/droiddoc/test/stubs
pushd $TOP
diff --git a/tools/releasetools/amend_generator.py b/tools/releasetools/amend_generator.py
new file mode 100644
index 0000000..8341599
--- /dev/null
+++ b/tools/releasetools/amend_generator.py
@@ -0,0 +1,205 @@
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import common
+
+class AmendGenerator(object):
+ """Class to generate scripts in the 'amend' recovery script language
+ used up through cupcake."""
+
+ def __init__(self):
+ self.script = ['assert compatible_with("0.2") == "true"']
+ self.included_files = set()
+
+ def MakeTemporary(self):
+ """Make a temporary script object whose commands can latter be
+ appended to the parent script with AppendScript(). Used when the
+ caller wants to generate script commands out-of-order."""
+ x = AmendGenerator()
+ x.script = []
+ x.included_files = self.included_files
+ return x
+
+ @staticmethod
+ def _FileRoot(fn):
+ """Convert a file path to the 'root' notation used by amend."""
+ if fn.startswith("/system/"):
+ return "SYSTEM:" + fn[8:]
+ elif fn == "/system":
+ return "SYSTEM:"
+ elif fn.startswith("/tmp/"):
+ return "CACHE:.." + fn
+ else:
+ raise ValueError("don't know root for \"%s\"" % (fn,))
+
+ @staticmethod
+ def _PartitionRoot(partition):
+ """Convert a partition name to the 'root' notation used by amend."""
+ if partition == "userdata":
+ return "DATA:"
+ else:
+ return partition.upper() + ":"
+
+ def AppendScript(self, other):
+ """Append the contents of another script (which should be created
+ with temporary=True) to this one."""
+ self.script.extend(other.script)
+ self.included_files.update(other.included_files)
+
+ def AssertSomeFingerprint(self, *fp):
+ """Assert that the current fingerprint is one of *fp."""
+ x = [('file_contains("SYSTEM:build.prop", '
+ '"ro.build.fingerprint=%s") == "true"') % i for i in fp]
+ self.script.append("assert %s" % (" || ".join(x),))
+
+ def AssertOlderBuild(self, timestamp):
+ """Assert that the build on the device is older (or the same as)
+ the given timestamp."""
+ self.script.append("run_program PACKAGE:check_prereq %s" % (timestamp,))
+ self.included_files.add("check_prereq")
+
+ def AssertDevice(self, device):
+ """Assert that the device identifier is the given string."""
+ self.script.append('assert getprop("ro.product.device") == "%s" || '
+ 'getprop("ro.build.product") == "%s"' % (device, device))
+
+ def AssertSomeBootloader(self, *bootloaders):
+ """Asert that the bootloader version is one of *bootloaders."""
+ self.script.append("assert " +
+ " || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
+ for b in bootloaders]))
+
+ def ShowProgress(self, frac, dur):
+ """Update the progress bar, advancing it over 'frac' over the next
+ 'dur' seconds."""
+ self.script.append("show_progress %f %d" % (frac, int(dur)))
+
+ def PatchCheck(self, filename, *sha1):
+ """Check that the given file (or MTD reference) has one of the
+ given *sha1 hashes."""
+ out = ["run_program PACKAGE:applypatch -c %s" % (filename,)]
+ for i in sha1:
+ out.append(" " + i)
+ self.script.append("".join(out))
+ self.included_files.add("applypatch")
+
+ def CacheFreeSpaceCheck(self, amount):
+ """Check that there's at least 'amount' space that can be made
+ available on /cache."""
+ self.script.append("run_program PACKAGE:applypatch -s %d" % (amount,))
+ self.included_files.add("applypatch")
+
+ def Mount(self, kind, what, path):
+ # no-op; amend uses it's 'roots' system to automatically mount
+ # things when they're referred to
+ pass
+
+ def UnpackPackageDir(self, src, dst):
+ """Unpack a given directory from the OTA package into the given
+ destination directory."""
+ dst = self._FileRoot(dst)
+ self.script.append("copy_dir PACKAGE:%s %s" % (src, dst))
+
+ def Comment(self, comment):
+ """Write a comment into the update script."""
+ self.script.append("")
+ for i in comment.split("\n"):
+ self.script.append("# " + i)
+ self.script.append("")
+
+ def Print(self, message):
+ """Log a message to the screen (if the logs are visible)."""
+ # no way to do this from amend; substitute a script comment instead
+ self.Comment(message)
+
+ def FormatPartition(self, partition):
+ """Format the given MTD partition."""
+ self.script.append("format %s" % (self._PartitionRoot(partition),))
+
+ def DeleteFiles(self, file_list):
+ """Delete all files in file_list."""
+ line = []
+ t = 0
+ for i in file_list:
+ i = self._FileRoot(i)
+ line.append(i)
+ t += len(i) + 1
+ if t > 80:
+ self.script.append("delete " + " ".join(line))
+ line = []
+ t = 0
+ if line:
+ self.script.append("delete " + " ".join(line))
+
+ def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
+ """Apply binary patches (in *patchpairs) to the given srcfile to
+ produce tgtfile (which may be "-" to indicate overwriting the
+ source file."""
+ if len(patchpairs) % 2 != 0:
+ raise ValueError("bad patches given to ApplyPatch")
+ self.script.append(
+ ("run_program PACKAGE:applypatch %s %s %s %d " %
+ (srcfile, tgtfile, tgtsha1, tgtsize)) +
+ " ".join(["%s:%s" % patchpairs[i:i+2]
+ for i in range(0, len(patchpairs), 2)]))
+ self.included_files.add("applypatch")
+
+ def WriteFirmwareImage(self, kind, fn):
+ """Arrange to update the given firmware image (kind must be
+ "hboot" or "radio") when recovery finishes."""
+ self.script.append("write_%s_image PACKAGE:%s" % (kind, fn))
+
+ def WriteRawImage(self, partition, fn):
+ """Write the given file into the given MTD partition."""
+ self.script.append("write_raw_image PACKAGE:%s %s" %
+ (fn, self._PartitionRoot(partition)))
+
+ def SetPermissions(self, fn, uid, gid, mode):
+ """Set file ownership and permissions."""
+ fn = self._FileRoot(fn)
+ self.script.append("set_perm %d %d 0%o %s" % (uid, gid, mode, fn))
+
+ def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode):
+ """Recursively set path ownership and permissions."""
+ fn = self._FileRoot(fn)
+ self.script.append("set_perm_recursive %d %d 0%o 0%o %s" %
+ (uid, gid, dmode, fmode, fn))
+
+ def MakeSymlinks(self, symlink_list):
+ """Create symlinks, given a list of (dest, link) pairs."""
+ self.script.extend(["symlink %s %s" % (i[0], self._FileRoot(i[1]))
+ for i in sorted(symlink_list)])
+
+ def AppendExtra(self, extra):
+ """Append text verbatim to the output script."""
+ self.script.append(extra)
+
+ def AddToZip(self, input_zip, output_zip, input_path=None):
+ """Write the accumulated script to the output_zip file. input_zip
+ is used as the source for any ancillary binaries needed by the
+ script. If input_path is not None, it will be used as a local
+ path for binaries instead of input_zip."""
+ common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-script",
+ "\n".join(self.script) + "\n")
+ for i in self.included_files:
+ try:
+ if input_path is None:
+ data = input_zip.read(os.path.join("OTA/bin", i))
+ else:
+ data = open(os.path.join(input_path, i)).read()
+ common.ZipWriteStr(output_zip, i, data, perms=0755)
+ except (IOError, KeyError), e:
+ raise ExternalError("unable to include binary %s: %s" % (i, e))
diff --git a/tools/releasetools/both_generator.py b/tools/releasetools/both_generator.py
new file mode 100644
index 0000000..df2a659
--- /dev/null
+++ b/tools/releasetools/both_generator.py
@@ -0,0 +1,60 @@
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import edify_generator
+import amend_generator
+
+class BothGenerator(object):
+ def __init__(self, version):
+ self.version = version
+ self.edify = edify_generator.EdifyGenerator(version)
+ self.amend = amend_generator.AmendGenerator()
+
+ def MakeTemporary(self):
+ x = BothGenerator(self.version)
+ x.edify = self.edify.MakeTemporary()
+ x.amend = self.amend.MakeTemporary()
+ return x
+
+ def AppendScript(self, other):
+ self.edify.AppendScript(other.edify)
+ self.amend.AppendScript(other.amend)
+
+ def _DoBoth(self, name, *args):
+ getattr(self.edify, name)(*args)
+ getattr(self.amend, name)(*args)
+
+ def AssertSomeFingerprint(self, *a): self._DoBoth("AssertSomeFingerprint", *a)
+ def AssertOlderBuild(self, *a): self._DoBoth("AssertOlderBuild", *a)
+ def AssertDevice(self, *a): self._DoBoth("AssertDevice", *a)
+ def AssertSomeBootloader(self, *a): self._DoBoth("AssertSomeBootloader", *a)
+ def ShowProgress(self, *a): self._DoBoth("ShowProgress", *a)
+ def PatchCheck(self, *a): self._DoBoth("PatchCheck", *a)
+ def CacheFreeSpaceCheck(self, *a): self._DoBoth("CacheFreeSpaceCheck", *a)
+ def Mount(self, *a): self._DoBoth("Mount", *a)
+ def UnpackPackageDir(self, *a): self._DoBoth("UnpackPackageDir", *a)
+ def Comment(self, *a): self._DoBoth("Comment", *a)
+ def Print(self, *a): self._DoBoth("Print", *a)
+ def FormatPartition(self, *a): self._DoBoth("FormatPartition", *a)
+ def DeleteFiles(self, *a): self._DoBoth("DeleteFiles", *a)
+ def ApplyPatch(self, *a): self._DoBoth("ApplyPatch", *a)
+ def WriteFirmwareImage(self, *a): self._DoBoth("WriteFirmwareImage", *a)
+ def WriteRawImage(self, *a): self._DoBoth("WriteRawImage", *a)
+ def SetPermissions(self, *a): self._DoBoth("SetPermissions", *a)
+ def SetPermissionsRecursive(self, *a): self._DoBoth("SetPermissionsRecursive", *a)
+ def MakeSymlinks(self, *a): self._DoBoth("MakeSymlinks", *a)
+ def AppendExtra(self, *a): self._DoBoth("AppendExtra", *a)
+
+ def AddToZip(self, input_zip, output_zip, input_path=None):
+ self._DoBoth("AddToZip", input_zip, output_zip, input_path)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 705ed84..a07ff7c 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import errno
import getopt
import getpass
import os
@@ -20,6 +21,7 @@
import subprocess
import sys
import tempfile
+import zipfile
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
@@ -27,7 +29,7 @@
class Options(object): pass
OPTIONS = Options()
-OPTIONS.signapk_jar = "out/host/linux-x86/framework/signapk.jar"
+OPTIONS.search_path = "out/host/linux-x86"
OPTIONS.max_image_size = {}
OPTIONS.verbose = False
OPTIONS.tempfiles = []
@@ -61,40 +63,62 @@
def BuildAndAddBootableImage(sourcedir, targetname, output_zip):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
'sourcedir'), and turn them into a boot image. Put the boot image
- into the output zip file under the name 'targetname'."""
+ into the output zip file under the name 'targetname'. Returns
+ targetname on success or None on failure (if sourcedir does not
+ appear to contain files for the requested image)."""
print "creating %s..." % (targetname,)
img = BuildBootableImage(sourcedir)
+ if img is None:
+ return None
CheckSize(img, targetname)
- output_zip.writestr(targetname, img)
+ ZipWriteStr(output_zip, targetname, img)
+ return targetname
def BuildBootableImage(sourcedir):
"""Take a kernel, cmdline, and ramdisk directory from the input (in
- 'sourcedir'), and turn them into a boot image. Return the image data."""
+ 'sourcedir'), and turn them into a boot image. Return the image
+ data, or None if sourcedir does not appear to contains files for
+ building the requested image."""
+
+ if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
+ not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
+ return None
ramdisk_img = tempfile.NamedTemporaryFile()
img = tempfile.NamedTemporaryFile()
p1 = Run(["mkbootfs", os.path.join(sourcedir, "RAMDISK")],
stdout=subprocess.PIPE)
- p2 = Run(["gzip", "-n"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
+ p2 = Run(["minigzip"],
+ stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
p2.wait()
p1.wait()
assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
- assert p2.returncode == 0, "gzip of %s ramdisk failed" % (targetname,)
+ assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
- cmdline = open(os.path.join(sourcedir, "cmdline")).read().rstrip("\n")
- p = Run(["mkbootimg",
- "--kernel", os.path.join(sourcedir, "kernel"),
- "--cmdline", cmdline,
- "--ramdisk", ramdisk_img.name,
- "--output", img.name],
- stdout=subprocess.PIPE)
+ cmd = ["mkbootimg", "--kernel", os.path.join(sourcedir, "kernel")]
+
+ fn = os.path.join(sourcedir, "cmdline")
+ if os.access(fn, os.F_OK):
+ cmd.append("--cmdline")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ fn = os.path.join(sourcedir, "base")
+ if os.access(fn, os.F_OK):
+ cmd.append("--base")
+ cmd.append(open(fn).read().rstrip("\n"))
+
+ cmd.extend(["--ramdisk", ramdisk_img.name,
+ "--output", img.name])
+
+ p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
- assert p.returncode == 0, "mkbootimg of %s image failed" % (targetname,)
+ assert p.returncode == 0, "mkbootimg of %s image failed" % (
+ os.path.basename(sourcedir),)
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -131,22 +155,30 @@
those which require them. Return a {key: password} dict. password
will be None if the key has no password."""
- key_passwords = {}
+ no_passwords = []
+ need_passwords = []
devnull = open("/dev/null", "w+b")
for k in sorted(keylist):
- p = subprocess.Popen(["openssl", "pkcs8", "-in", k+".pk8",
- "-inform", "DER", "-nocrypt"],
- stdin=devnull.fileno(),
- stdout=devnull.fileno(),
- stderr=subprocess.STDOUT)
+ # An empty-string key is used to mean don't re-sign this package.
+ # Obviously we don't need a password for this non-key.
+ if not k:
+ no_passwords.append(k)
+ continue
+
+ p = Run(["openssl", "pkcs8", "-in", k+".pk8",
+ "-inform", "DER", "-nocrypt"],
+ stdin=devnull.fileno(),
+ stdout=devnull.fileno(),
+ stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 0:
- print "%s.pk8 does not require a password" % (k,)
- key_passwords[k] = None
+ no_passwords.append(k)
else:
- key_passwords[k] = getpass.getpass("Enter password for %s.pk8> " % (k,))
+ need_passwords.append(k)
devnull.close()
- print
+
+ key_passwords = PasswordManager().GetPasswords(need_passwords)
+ key_passwords.update(dict.fromkeys(no_passwords, None))
return key_passwords
@@ -167,12 +199,13 @@
else:
sign_name = output_name
- p = subprocess.Popen(["java", "-jar", OPTIONS.signapk_jar,
- key + ".x509.pem",
- key + ".pk8",
- input_name, sign_name],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE)
+ p = Run(["java", "-jar",
+ os.path.join(OPTIONS.search_path, "framework", "signapk.jar"),
+ key + ".x509.pem",
+ key + ".pk8",
+ input_name, sign_name],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE)
if password is not None:
password += "\n"
p.communicate(password)
@@ -180,7 +213,7 @@
raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
if align:
- p = subprocess.Popen(["zipalign", "-f", str(align), sign_name, output_name])
+ p = Run(["zipalign", "-f", str(align), sign_name, output_name])
p.communicate()
if p.returncode != 0:
raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
@@ -209,8 +242,8 @@
COMMON_DOCSTRING = """
-p (--path) <dir>
- Prepend <dir> to the list of places to search for binaries run
- by this script.
+ Prepend <dir>/bin to the list of places to search for binaries
+ run by this script, and expect to find jars in <dir>/framework.
-v (--verbose)
Show command lines being executed.
@@ -252,15 +285,13 @@
elif o in ("-v", "--verbose"):
OPTIONS.verbose = True
elif o in ("-p", "--path"):
- os.environ["PATH"] = a + os.pathsep + os.environ["PATH"]
- path_specified = True
+ OPTIONS.search_path = a
else:
if extra_option_handler is None or not extra_option_handler(o, a):
assert False, "unknown option \"%s\"" % (o,)
- if not path_specified:
- os.environ["PATH"] = ("out/host/linux-x86/bin" + os.pathsep +
- os.environ["PATH"])
+ os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
+ os.pathsep + os.environ["PATH"])
return args
@@ -271,3 +302,111 @@
shutil.rmtree(i)
else:
os.remove(i)
+
+
+class PasswordManager(object):
+ def __init__(self):
+ self.editor = os.getenv("EDITOR", None)
+ self.pwfile = os.getenv("ANDROID_PW_FILE", None)
+
+ def GetPasswords(self, items):
+ """Get passwords corresponding to each string in 'items',
+ returning a dict. (The dict may have keys in addition to the
+ values in 'items'.)
+
+ Uses the passwords in $ANDROID_PW_FILE if available, letting the
+ user edit that file to add more needed passwords. If no editor is
+ available, or $ANDROID_PW_FILE isn't define, prompts the user
+ interactively in the ordinary way.
+ """
+
+ current = self.ReadFile()
+
+ first = True
+ while True:
+ missing = []
+ for i in items:
+ if i not in current or not current[i]:
+ missing.append(i)
+ # Are all the passwords already in the file?
+ if not missing: return current
+
+ for i in missing:
+ current[i] = ""
+
+ if not first:
+ print "key file %s still missing some passwords." % (self.pwfile,)
+ answer = raw_input("try to edit again? [y]> ").strip()
+ if answer and answer[0] not in 'yY':
+ raise RuntimeError("key passwords unavailable")
+ first = False
+
+ current = self.UpdateAndReadFile(current)
+
+ def PromptResult(self, current):
+ """Prompt the user to enter a value (password) for each key in
+ 'current' whose value is fales. Returns a new dict with all the
+ values.
+ """
+ result = {}
+ for k, v in sorted(current.iteritems()):
+ if v:
+ result[k] = v
+ else:
+ while True:
+ result[k] = getpass.getpass("Enter password for %s key> "
+ % (k,)).strip()
+ if result[k]: break
+ return result
+
+ def UpdateAndReadFile(self, current):
+ if not self.editor or not self.pwfile:
+ return self.PromptResult(current)
+
+ f = open(self.pwfile, "w")
+ os.chmod(self.pwfile, 0600)
+ f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
+ f.write("# (Additional spaces are harmless.)\n\n")
+
+ first_line = None
+ sorted = [(not v, k, v) for (k, v) in current.iteritems()]
+ sorted.sort()
+ for i, (_, k, v) in enumerate(sorted):
+ f.write("[[[ %s ]]] %s\n" % (v, k))
+ if not v and first_line is None:
+ # position cursor on first line with no password.
+ first_line = i + 4
+ f.close()
+
+ p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
+ _, _ = p.communicate()
+
+ return self.ReadFile()
+
+ def ReadFile(self):
+ result = {}
+ if self.pwfile is None: return result
+ try:
+ f = open(self.pwfile, "r")
+ for line in f:
+ line = line.strip()
+ if not line or line[0] == '#': continue
+ m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
+ if not m:
+ print "failed to parse password file: ", line
+ else:
+ result[m.group(2)] = m.group(1)
+ f.close()
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ print "error reading password file: ", str(e)
+ return result
+
+
+def ZipWriteStr(zip, filename, data, perms=0644):
+ # use a fixed timestamp so the output is repeatable.
+ zinfo = zipfile.ZipInfo(filename=filename,
+ date_time=(2009, 1, 1, 0, 0, 0))
+ zinfo.compress_type = zip.compression
+ zinfo.external_attr = perms << 16
+ zip.writestr(zinfo, data)
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
new file mode 100644
index 0000000..e7a15cd
--- /dev/null
+++ b/tools/releasetools/edify_generator.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+
+import common
+
+class EdifyGenerator(object):
+ """Class to generate scripts in the 'edify' recovery script language
+ used from donut onwards."""
+
+ def __init__(self, version):
+ self.script = []
+ self.mounts = set()
+ self.version = version
+
+ def MakeTemporary(self):
+ """Make a temporary script object whose commands can latter be
+ appended to the parent script with AppendScript(). Used when the
+ caller wants to generate script commands out-of-order."""
+ x = EdifyGenerator(self.version)
+ x.mounts = self.mounts
+ return x
+
+ @staticmethod
+ def _WordWrap(cmd, linelen=80):
+ """'cmd' should be a function call with null characters after each
+ parameter (eg, "somefun(foo,\0bar,\0baz)"). This function wraps cmd
+ to a given line length, replacing nulls with spaces and/or newlines
+ to format it nicely."""
+ indent = cmd.index("(")+1
+ out = []
+ first = True
+ x = re.compile("^(.{,%d})\0" % (linelen-indent,))
+ while True:
+ if not first:
+ out.append(" " * indent)
+ first = False
+ m = x.search(cmd)
+ if not m:
+ parts = cmd.split("\0", 1)
+ out.append(parts[0]+"\n")
+ if len(parts) == 1:
+ break
+ else:
+ cmd = parts[1]
+ continue
+ out.append(m.group(1)+"\n")
+ cmd = cmd[m.end():]
+
+ return "".join(out).replace("\0", " ").rstrip("\n")
+
+ def AppendScript(self, other):
+ """Append the contents of another script (which should be created
+ with temporary=True) to this one."""
+ self.script.extend(other.script)
+
+ def AssertSomeFingerprint(self, *fp):
+ """Assert that the current system build fingerprint is one of *fp."""
+ if not fp:
+ raise ValueError("must specify some fingerprints")
+ cmd = ('assert(' +
+ ' ||\0'.join([('file_getprop("/system/build.prop", '
+ '"ro.build.fingerprint") == "%s"')
+ % i for i in fp]) +
+ ');')
+ self.script.append(self._WordWrap(cmd))
+
+ def AssertOlderBuild(self, timestamp):
+ """Assert that the build on the device is older (or the same as)
+ the given timestamp."""
+ self.script.append(('assert(!less_than_int(%s, '
+ 'getprop("ro.build.date.utc")));') % (timestamp,))
+
+ def AssertDevice(self, device):
+ """Assert that the device identifier is the given string."""
+ cmd = ('assert(getprop("ro.product.device") == "%s" ||\0'
+ 'getprop("ro.build.product") == "%s");' % (device, device))
+ self.script.append(self._WordWrap(cmd))
+
+ def AssertSomeBootloader(self, *bootloaders):
+ """Asert that the bootloader version is one of *bootloaders."""
+ cmd = ("assert(" +
+ " ||\0".join(['getprop("ro.bootloader") == "%s"' % (b,)
+ for b in bootloaders]) +
+ ");")
+ self.script.append(self._WordWrap(cmd))
+
+ def ShowProgress(self, frac, dur):
+ """Update the progress bar, advancing it over 'frac' over the next
+ 'dur' seconds."""
+ self.script.append("show_progress(%f, %d);" % (frac, int(dur)))
+
+ def PatchCheck(self, filename, *sha1):
+ """Check that the given file (or MTD reference) has one of the
+ given *sha1 hashes."""
+ self.script.append('assert(apply_patch_check("%s"' % (filename,) +
+ "".join([', "%s"' % (i,) for i in sha1]) +
+ '));')
+
+ def CacheFreeSpaceCheck(self, amount):
+ """Check that there's at least 'amount' space that can be made
+ available on /cache."""
+ self.script.append("assert(apply_patch_space(%d));" % (amount,))
+
+ def Mount(self, kind, what, path):
+ """Mount the given 'what' at the given path. 'what' should be a
+ partition name if kind is "MTD", or a block device if kind is
+ "vfat". No other values of 'kind' are supported."""
+ self.script.append('mount("%s", "%s", "%s");' % (kind, what, path))
+ self.mounts.add(path)
+
+ def UnpackPackageDir(self, src, dst):
+ """Unpack a given directory from the OTA package into the given
+ destination directory."""
+ self.script.append('package_extract_dir("%s", "%s");' % (src, dst))
+
+ def Comment(self, comment):
+ """Write a comment into the update script."""
+ self.script.append("")
+ for i in comment.split("\n"):
+ self.script.append("# " + i)
+ self.script.append("")
+
+ def Print(self, message):
+ """Log a message to the screen (if the logs are visible)."""
+ self.script.append('ui_print("%s");' % (message,))
+
+ def FormatPartition(self, partition):
+ """Format the given MTD partition."""
+ self.script.append('format("MTD", "%s");' % (partition,))
+
+ def DeleteFiles(self, file_list):
+ """Delete all files in file_list."""
+ if not file_list: return
+ cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
+ self.script.append(self._WordWrap(cmd))
+
+ def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
+ """Apply binary patches (in *patchpairs) to the given srcfile to
+ produce tgtfile (which may be "-" to indicate overwriting the
+ source file."""
+ if len(patchpairs) % 2 != 0 or len(patchpairs) == 0:
+ raise ValueError("bad patches given to ApplyPatch")
+ cmd = ['apply_patch("%s",\0"%s",\0%s,\0%d'
+ % (srcfile, tgtfile, tgtsha1, tgtsize)]
+ for i in range(0, len(patchpairs), 2):
+ cmd.append(',\0"%s:%s"' % patchpairs[i:i+2])
+ cmd.append(');')
+ cmd = "".join(cmd)
+ self.script.append(self._WordWrap(cmd))
+
+ def WriteFirmwareImage(self, kind, fn):
+ """Arrange to update the given firmware image (kind must be
+ "hboot" or "radio") when recovery finishes."""
+ if self.version == 1:
+ self.script.append(
+ ('assert(package_extract_file("%(fn)s", "/tmp/%(kind)s.img"),\n'
+ ' write_firmware_image("/tmp/%(kind)s.img", "%(kind)s"));')
+ % {'kind': kind, 'fn': fn})
+ else:
+ self.script.append(
+ 'write_firmware_image("PACKAGE:%s", "%s");' % (fn, kind))
+
+ def WriteRawImage(self, partition, fn):
+ """Write the given package file into the given MTD partition."""
+ self.script.append(
+ ('assert(package_extract_file("%(fn)s", "/tmp/%(partition)s.img"),\n'
+ ' write_raw_image("/tmp/%(partition)s.img", "%(partition)s"),\n'
+ ' delete("/tmp/%(partition)s.img"));')
+ % {'partition': partition, 'fn': fn})
+
+ def SetPermissions(self, fn, uid, gid, mode):
+ """Set file ownership and permissions."""
+ self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
+
+ def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode):
+ """Recursively set path ownership and permissions."""
+ self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
+ % (uid, gid, dmode, fmode, fn))
+
+ def MakeSymlinks(self, symlink_list):
+ """Create symlinks, given a list of (dest, link) pairs."""
+ by_dest = {}
+ for d, l in symlink_list:
+ by_dest.setdefault(d, []).append(l)
+
+ for dest, links in sorted(by_dest.iteritems()):
+ cmd = ('symlink("%s", ' % (dest,) +
+ ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
+ self.script.append(self._WordWrap(cmd))
+
+ def AppendExtra(self, extra):
+ """Append text verbatim to the output script."""
+ self.script.append(extra)
+
+ def AddToZip(self, input_zip, output_zip, input_path=None):
+ """Write the accumulated script to the output_zip file. input_zip
+ is used as the source for the 'updater' binary needed to run
+ script. If input_path is not None, it will be used as a local
+ path for the binary instead of input_zip."""
+
+ for p in sorted(self.mounts):
+ self.script.append('unmount("%s");' % (p,))
+
+ common.ZipWriteStr(output_zip, "META-INF/com/google/android/updater-script",
+ "\n".join(self.script) + "\n")
+
+ if input_path is None:
+ data = input_zip.read("OTA/bin/updater")
+ else:
+ data = open(os.path.join(input_path, "updater")).read()
+ common.ZipWriteStr(output_zip, "META-INF/com/google/android/update-binary",
+ data, perms=0755)
diff --git a/tools/releasetools/img_from_target_files b/tools/releasetools/img_from_target_files
index 3451352..1d154b9 100755
--- a/tools/releasetools/img_from_target_files
+++ b/tools/releasetools/img_from_target_files
@@ -96,7 +96,7 @@
img.close()
common.CheckSize(data, "system.img")
- output_zip.writestr("system.img", data)
+ common.ZipWriteStr(output_zip, "system.img", data)
def CopyInfo(output_zip):
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
index dbac03d..4b7ee03 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files
@@ -33,6 +33,22 @@
Generate an incremental OTA using the given target-files zip as
the starting build.
+ -w (--wipe_user_data)
+ Generate an OTA package that will wipe the user data partition
+ when installed.
+
+ -n (--no_prereq)
+ Omit the timestamp prereq check normally included at the top of
+ the build scripts (used for developer OTA packages which
+ legitimately need to go back and forth).
+
+ -e (--extra_script) <file>
+ Insert the contents of file at the end of the update script.
+
+ -m (--script_mode) <mode>
+ Specify 'amend' or 'edify' scripts, or 'auto' to pick
+ automatically (this is the default).
+
"""
import sys
@@ -51,6 +67,9 @@
import zipfile
import common
+import amend_generator
+import edify_generator
+import both_generator
OPTIONS = common.OPTIONS
OPTIONS.package_key = "build/target/product/security/testkey"
@@ -58,6 +77,10 @@
OPTIONS.require_verbatim = set()
OPTIONS.prohibit_verbatim = set(("system/build.prop",))
OPTIONS.patch_threshold = 0.95
+OPTIONS.wipe_user_data = False
+OPTIONS.omit_prereq = False
+OPTIONS.extra_script = None
+OPTIONS.script_mode = 'auto'
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
@@ -178,11 +201,10 @@
return d
- def SetPermissions(self, script, renamer=lambda x: x):
+ def SetPermissions(self, script):
"""Append set_perm/set_perm_recursive commands to 'script' to
set all permissions, users, and groups for the tree of files
- rooted at 'self'. 'renamer' turns the filenames stored in the
- tree of Items into the strings used in the script."""
+ rooted at 'self'."""
self.CountChildMetadata()
@@ -193,22 +215,19 @@
# supposed to be something different.
if item.dir:
if current != item.best_subtree:
- script.append("set_perm_recursive %d %d 0%o 0%o %s" %
- (item.best_subtree + (renamer(item.name),)))
+ script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
current = item.best_subtree
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[2]:
- script.append("set_perm %d %d 0%o %s" %
- (item.uid, item.gid, item.mode, renamer(item.name)))
+ script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
for i in item.children:
recurse(i, current)
else:
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[3]:
- script.append("set_perm %d %d 0%o %s" %
- (item.uid, item.gid, item.mode, renamer(item.name)))
+ script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
recurse(self, (-1, -1, -1, -1))
@@ -230,7 +249,7 @@
basefilename = info.filename[7:]
if IsSymlink(info):
symlinks.append((input_zip.read(info.filename),
- "SYSTEM:" + basefilename))
+ "/system/" + basefilename))
else:
info2 = copy.copy(info)
fn = info2.filename = "system/" + basefilename
@@ -251,14 +270,6 @@
return symlinks
-def AddScript(script, output_zip):
- now = time.localtime()
- i = zipfile.ZipInfo("META-INF/com/google/android/update-script",
- (now.tm_year, now.tm_mon, now.tm_mday,
- now.tm_hour, now.tm_min, now.tm_sec))
- output_zip.writestr(i, "\n".join(script) + "\n")
-
-
def SignOutput(temp_zip_name, output_zip_name):
key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
pw = key_passwords[OPTIONS.package_key]
@@ -266,89 +277,77 @@
common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw)
-def SubstituteRoot(s):
- if s == "system": return "SYSTEM:"
- assert s.startswith("system/")
- return "SYSTEM:" + s[7:]
-
def FixPermissions(script):
Item.GetMetadata()
root = Item.Get("system")
- root.SetPermissions(script, renamer=SubstituteRoot)
+ root.SetPermissions(script)
-def DeleteFiles(script, to_delete):
- line = []
- t = 0
- for i in to_delete:
- line.append(i)
- t += len(i) + 1
- if t > 80:
- script.append("delete " + " ".join(line))
- line = []
- t = 0
- if line:
- script.append("delete " + " ".join(line))
def AppendAssertions(script, input_zip):
- script.append('assert compatible_with("0.2") == "true"')
-
device = GetBuildProp("ro.product.device", input_zip)
- script.append('assert getprop("ro.product.device") == "%s" || '
- 'getprop("ro.build.product") == "%s"' % (device, device))
+ script.AssertDevice(device)
info = input_zip.read("OTA/android-info.txt")
m = re.search(r"require\s+version-bootloader\s*=\s*(\S+)", info)
- if not m:
- raise ExternalError("failed to find required bootloaders in "
- "android-info.txt")
- bootloaders = m.group(1).split("|")
- script.append("assert " +
- " || ".join(['getprop("ro.bootloader") == "%s"' % (b,)
- for b in bootloaders]))
-
-
-def IncludeBinary(name, input_zip, output_zip):
- try:
- data = input_zip.read(os.path.join("OTA/bin", name))
- output_zip.writestr(name, data)
- except IOError:
- raise ExternalError('unable to include device binary "%s"' % (name,))
+ if m:
+ bootloaders = m.group(1).split("|")
+ script.AssertSomeBootloader(*bootloaders)
def WriteFullOTAPackage(input_zip, output_zip):
- script = []
+ if OPTIONS.script_mode == "auto":
+ script = both_generator.BothGenerator(2)
+ elif OPTIONS.script_mode == "amend":
+ script = amend_generator.AmendGenerator()
+ else:
+ # TODO: how to determine this? We don't know what version it will
+ # be installed on top of. For now, we expect the API just won't
+ # change very often.
+ script = edify_generator.EdifyGenerator(2)
- ts = GetBuildProp("ro.build.date.utc", input_zip)
- script.append("run_program PACKAGE:check_prereq %s" % (ts,))
- IncludeBinary("check_prereq", input_zip, output_zip)
+ if not OPTIONS.omit_prereq:
+ ts = GetBuildProp("ro.build.date.utc", input_zip)
+ script.AssertOlderBuild(ts)
AppendAssertions(script, input_zip)
- script.append("format BOOT:")
- script.append("show_progress 0.1 0")
+ script.ShowProgress(0.1, 0)
- output_zip.writestr("radio.img", input_zip.read("RADIO/image"))
- script.append("write_radio_image PACKAGE:radio.img")
- script.append("show_progress 0.5 0")
+ try:
+ common.ZipWriteStr(output_zip, "radio.img", input_zip.read("RADIO/image"))
+ script.WriteFirmwareImage("radio", "radio.img")
+ except KeyError:
+ print "warning: no radio image in input target_files; not flashing radio"
- script.append("format SYSTEM:")
- script.append("copy_dir PACKAGE:system SYSTEM:")
+ script.ShowProgress(0.5, 0)
+
+ if OPTIONS.wipe_user_data:
+ script.FormatPartition("userdata")
+
+ script.FormatPartition("system")
+ script.Mount("MTD", "system", "/system")
+ script.UnpackPackageDir("system", "/system")
symlinks = CopySystemFiles(input_zip, output_zip)
- script.extend(["symlink %s %s" % s for s in symlinks])
+ script.MakeSymlinks(symlinks)
- common.BuildAndAddBootableImage(os.path.join(OPTIONS.input_tmp, "RECOVERY"),
- "system/recovery.img", output_zip)
- Item.Get("system/recovery.img", dir=False)
+ if common.BuildAndAddBootableImage(
+ os.path.join(OPTIONS.input_tmp, "RECOVERY"),
+ "system/recovery.img", output_zip):
+ Item.Get("system/recovery.img", dir=False)
FixPermissions(script)
common.AddBoot(output_zip)
- script.append("show_progress 0.2 0")
- script.append("write_raw_image PACKAGE:boot.img BOOT:")
- script.append("show_progress 0.2 10")
+ script.ShowProgress(0.2, 0)
- AddScript(script, output_zip)
+ script.WriteRawImage("boot", "boot.img")
+ script.ShowProgress(0.2, 10)
+
+ if OPTIONS.extra_script is not None:
+ script.AppendExtra(OPTIONS.extra_script)
+
+ script.AddToZip(input_zip, output_zip)
class File(object):
@@ -365,7 +364,7 @@
return t
def AddToZip(self, z):
- z.writestr(self.name, self.data)
+ common.ZipWriteStr(z, self.name, self.data)
def LoadSystemFiles(z):
@@ -380,8 +379,11 @@
return out
-def Difference(tf, sf):
- """Return the patch (as a string of data) needed to turn sf into tf."""
+def Difference(tf, sf, diff_program):
+ """Return the patch (as a string of data) needed to turn sf into tf.
+ diff_program is the name of an external program (or list, if
+ additional arguments are desired) to run to generate the diff.
+ """
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
@@ -390,13 +392,21 @@
try:
ptemp = tempfile.NamedTemporaryFile()
- p = common.Run(["bsdiff", stemp.name, ttemp.name, ptemp.name])
+ if isinstance(diff_program, list):
+ cmd = copy.copy(diff_program)
+ else:
+ cmd = [diff_program]
+ cmd.append(stemp.name)
+ cmd.append(ttemp.name)
+ cmd.append(ptemp.name)
+ p = common.Run(cmd)
_, err = p.communicate()
- if err:
- raise ExternalError("failure running bsdiff:\n%s\n" % (err,))
+ if err or p.returncode != 0:
+ print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
+ return None
diff = ptemp.read()
- ptemp.close()
finally:
+ ptemp.close()
stemp.close()
ttemp.close()
@@ -411,12 +421,42 @@
return bp
m = re.search(re.escape(property) + r"=(.*)\n", bp)
if not m:
- raise ExternalException("couldn't find %s in build.prop" % (property,))
+ raise common.ExternalError("couldn't find %s in build.prop" % (property,))
return m.group(1).strip()
+def GetRecoveryAPIVersion(zip):
+ """Returns the version of the recovery API. Version 0 is the older
+ amend code (no separate binary)."""
+ try:
+ version = zip.read("META/recovery-api-version.txt")
+ return int(version)
+ except KeyError:
+ try:
+ # version one didn't have the recovery-api-version.txt file, but
+ # it did include an updater binary.
+ zip.getinfo("OTA/bin/updater")
+ return 1
+ except KeyError:
+ return 0
+
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
- script = []
+ source_version = GetRecoveryAPIVersion(source_zip)
+
+ if OPTIONS.script_mode == 'amend':
+ script = amend_generator.AmendGenerator()
+ elif OPTIONS.script_mode == 'edify':
+ if source_version == 0:
+ print ("WARNING: generating edify script for a source that "
+ "can't install it.")
+ script = edify_generator.EdifyGenerator(source_version)
+ elif OPTIONS.script_mode == 'auto':
+ if source_version > 0:
+ script = edify_generator.EdifyGenerator(source_version)
+ else:
+ script = amend_generator.AmendGenerator()
+ else:
+ raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
print "Loading target..."
target_data = LoadSystemFiles(target_zip)
@@ -433,20 +473,24 @@
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
- raise ExternalError("\"%s\" must be sent verbatim" % (fn,))
+ raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
print "send", fn, "verbatim"
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
elif tf.sha1 != sf.sha1:
# File is different; consider sending as a patch
- d = Difference(tf, sf)
- print fn, tf.size, len(d), (float(len(d)) / tf.size)
- if len(d) > tf.size * OPTIONS.patch_threshold:
+ diff_method = "bsdiff"
+ if tf.name.endswith(".gz"):
+ diff_method = "imgdiff"
+ d = Difference(tf, sf, diff_method)
+ if d is not None:
+ print fn, tf.size, len(d), (float(len(d)) / tf.size)
+ if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
# patch is almost as big as the file; don't bother patching
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
else:
- output_zip.writestr("patch/" + fn + ".p", d)
+ common.ZipWriteStr(output_zip, "patch/" + fn + ".p", d)
patch_list.append((fn, tf, sf, tf.size))
largest_source_size = max(largest_source_size, sf.size)
else:
@@ -459,23 +503,24 @@
source_fp = GetBuildProp("ro.build.fingerprint", source_zip)
target_fp = GetBuildProp("ro.build.fingerprint", target_zip)
- script.append(('assert file_contains("SYSTEM:build.prop", '
- '"ro.build.fingerprint=%s") == "true" || '
- 'file_contains("SYSTEM:build.prop", '
- '"ro.build.fingerprint=%s") == "true"') %
- (source_fp, target_fp))
+ script.Mount("MTD", "system", "/system")
+ script.AssertSomeFingerprint(source_fp, target_fp)
- source_boot = common.BuildBootableImage(
- os.path.join(OPTIONS.source_tmp, "BOOT"))
- target_boot = common.BuildBootableImage(
- os.path.join(OPTIONS.target_tmp, "BOOT"))
- updating_boot = (source_boot != target_boot)
+ source_boot = File("/tmp/boot.img",
+ common.BuildBootableImage(
+ os.path.join(OPTIONS.source_tmp, "BOOT")))
+ target_boot = File("/tmp/boot.img",
+ common.BuildBootableImage(
+ os.path.join(OPTIONS.target_tmp, "BOOT")))
+ updating_boot = (source_boot.data != target_boot.data)
- source_recovery = common.BuildBootableImage(
- os.path.join(OPTIONS.source_tmp, "RECOVERY"))
- target_recovery = common.BuildBootableImage(
- os.path.join(OPTIONS.target_tmp, "RECOVERY"))
- updating_recovery = (source_recovery != target_recovery)
+ source_recovery = File("system/recovery.img",
+ common.BuildBootableImage(
+ os.path.join(OPTIONS.source_tmp, "RECOVERY")))
+ target_recovery = File("system/recovery.img",
+ common.BuildBootableImage(
+ os.path.join(OPTIONS.target_tmp, "RECOVERY")))
+ updating_recovery = (source_recovery.data != target_recovery.data)
source_radio = source_zip.read("RADIO/image")
target_radio = target_zip.read("RADIO/image")
@@ -491,65 +536,110 @@
AppendAssertions(script, target_zip)
+ script.Print("Verifying current system...")
+
pb_verify = progress_bar_total * 0.3 * \
(total_patched_size /
- float(total_patched_size+total_verbatim_size))
+ float(total_patched_size+total_verbatim_size+1))
for i, (fn, tf, sf, size) in enumerate(patch_list):
if i % 5 == 0:
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
- script.append("show_progress %f 1" %
- (next_sizes * pb_verify / total_patched_size,))
- script.append("run_program PACKAGE:applypatch -c /%s %s %s" %
- (fn, tf.sha1, sf.sha1))
+ script.ShowProgress(next_sizes * pb_verify / (total_patched_size+1), 1)
- if patch_list:
- script.append("run_program PACKAGE:applypatch -s %d" %
- (largest_source_size,))
- script.append("copy_dir PACKAGE:patch CACHE:../tmp/patchtmp")
- IncludeBinary("applypatch", target_zip, output_zip)
+ script.PatchCheck("/"+fn, tf.sha1, sf.sha1)
- script.append("\n# ---- start making changes here\n")
+ if updating_recovery:
+ d = Difference(target_recovery, source_recovery, "imgdiff")
+ print "recovery target: %d source: %d diff: %d" % (
+ target_recovery.size, source_recovery.size, len(d))
- DeleteFiles(script, [SubstituteRoot(i[0]) for i in verbatim_targets])
+ common.ZipWriteStr(output_zip, "patch/recovery.img.p", d)
+
+ script.PatchCheck("MTD:recovery:%d:%s:%d:%s" %
+ (source_recovery.size, source_recovery.sha1,
+ target_recovery.size, target_recovery.sha1))
if updating_boot:
- script.append("format BOOT:")
- output_zip.writestr("boot.img", target_boot)
+ d = Difference(target_boot, source_boot, "imgdiff")
+ print "boot target: %d source: %d diff: %d" % (
+ target_boot.size, source_boot.size, len(d))
+
+ common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
+
+ script.PatchCheck("MTD:boot:%d:%s:%d:%s" %
+ (source_boot.size, source_boot.sha1,
+ target_boot.size, target_boot.sha1))
+
+ if patch_list or updating_recovery or updating_boot:
+ script.CacheFreeSpaceCheck(largest_source_size)
+ script.Print("Unpacking patches...")
+ script.UnpackPackageDir("patch", "/tmp/patchtmp")
+
+ script.Comment("---- start making changes here ----")
+
+ if OPTIONS.wipe_user_data:
+ script.Print("Erasing user data...")
+ script.FormatPartition("userdata")
+
+ script.Print("Removing unneeded files...")
+ script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
+ ["/"+i for i in sorted(source_data)
+ if i not in target_data])
+
+ if updating_boot:
+ # Produce the boot image by applying a patch to the current
+ # contents of the boot partition, and write it back to the
+ # partition.
+ script.Print("Patching boot image...")
+ script.ApplyPatch("MTD:boot:%d:%s:%d:%s"
+ % (source_boot.size, source_boot.sha1,
+ target_boot.size, target_boot.sha1),
+ "-",
+ target_boot.size, target_boot.sha1,
+ source_boot.sha1, "/tmp/patchtmp/boot.img.p")
print "boot image changed; including."
else:
print "boot image unchanged; skipping."
if updating_recovery:
- output_zip.writestr("system/recovery.img", target_recovery)
+ # Produce /system/recovery.img by applying a patch to the current
+ # contents of the recovery partition.
+ script.Print("Patching recovery image...")
+ script.ApplyPatch("MTD:recovery:%d:%s:%d:%s"
+ % (source_recovery.size, source_recovery.sha1,
+ target_recovery.size, target_recovery.sha1),
+ "/system/recovery.img",
+ target_recovery.size, target_recovery.sha1,
+ source_recovery.sha1, "/tmp/patchtmp/recovery.img.p")
print "recovery image changed; including."
else:
print "recovery image unchanged; skipping."
if updating_radio:
- script.append("show_progress 0.3 10")
- script.append("write_radio_image PACKAGE:radio.img")
- output_zip.writestr("radio.img", target_radio)
+ script.ShowProgress(0.3, 10)
+ script.Print("Writing radio image...")
+ script.WriteFirmwareImage("radio", "radio.img")
+ common.ZipWriteStr(output_zip, "radio.img", target_radio)
print "radio image changed; including."
else:
print "radio image unchanged; skipping."
+ script.Print("Patching system files...")
pb_apply = progress_bar_total * 0.7 * \
(total_patched_size /
- float(total_patched_size+total_verbatim_size))
+ float(total_patched_size+total_verbatim_size+1))
for i, (fn, tf, sf, size) in enumerate(patch_list):
if i % 5 == 0:
next_sizes = sum([i[3] for i in patch_list[i:i+5]])
- script.append("show_progress %f 1" %
- (next_sizes * pb_apply / total_patched_size,))
- script.append(("run_program PACKAGE:applypatch "
- "/%s %s %d %s:/tmp/patchtmp/%s.p") %
- (fn, tf.sha1, tf.size, sf.sha1, fn))
+ script.ShowProgress(next_sizes * pb_apply / (total_patched_size+1), 1)
+ script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1,
+ sf.sha1, "/tmp/patchtmp/"+fn+".p")
target_symlinks = CopySystemFiles(target_zip, None)
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
- temp_script = []
+ temp_script = script.MakeTemporary()
FixPermissions(temp_script)
# Note that this call will mess up the tree of Items, so make sure
@@ -564,14 +654,17 @@
for dest, link in source_symlinks:
if link not in target_symlinks_d:
to_delete.append(link)
- DeleteFiles(script, to_delete)
+ script.DeleteFiles(to_delete)
if verbatim_targets:
pb_verbatim = progress_bar_total * \
(total_verbatim_size /
- float(total_patched_size+total_verbatim_size))
- script.append("show_progress %f 5" % (pb_verbatim,))
- script.append("copy_dir PACKAGE:system SYSTEM:")
+ float(total_patched_size+total_verbatim_size+1))
+ script.ShowProgress(pb_verbatim, 5)
+ script.Print("Unpacking new files...")
+ script.UnpackPackageDir("system", "/system")
+
+ script.Print("Finishing up...")
# Create all the symlinks that don't already exist, or point to
# somewhere different than what we want. Delete each symlink before
@@ -583,18 +676,17 @@
to_create.append((dest, link))
else:
to_create.append((dest, link))
- DeleteFiles(script, [i[1] for i in to_create])
- script.extend(["symlink %s %s" % s for s in to_create])
+ script.DeleteFiles([i[1] for i in to_create])
+ script.MakeSymlinks(to_create)
# Now that the symlinks are created, we can set all the
# permissions.
- script.extend(temp_script)
+ script.AppendScript(temp_script)
- if updating_boot:
- script.append("show_progress 0.1 5")
- script.append("write_raw_image PACKAGE:boot.img BOOT:")
+ if OPTIONS.extra_script is not None:
+ scirpt.AppendExtra(OPTIONS.extra_script)
- AddScript(script, output_zip)
+ script.AddToZip(target_zip, output_zip)
def main(argv):
@@ -602,21 +694,31 @@
def option_handler(o, a):
if o in ("-b", "--board_config"):
common.LoadBoardConfig(a)
- return True
elif o in ("-k", "--package_key"):
OPTIONS.package_key = a
- return True
elif o in ("-i", "--incremental_from"):
OPTIONS.incremental_source = a
- return True
+ elif o in ("-w", "--wipe_user_data"):
+ OPTIONS.wipe_user_data = True
+ elif o in ("-n", "--no_prereq"):
+ OPTIONS.omit_prereq = True
+ elif o in ("-e", "--extra_script"):
+ OPTIONS.extra_script = a
+ elif o in ("-m", "--script_mode"):
+ OPTIONS.script_mode = a
else:
return False
+ return True
args = common.ParseOptions(argv, __doc__,
- extra_opts="b:k:i:d:",
+ extra_opts="b:k:i:d:wne:m:",
extra_long_opts=["board_config=",
"package_key=",
- "incremental_from="],
+ "incremental_from=",
+ "wipe_user_data",
+ "no_prereq",
+ "extra_script=",
+ "script_mode="],
extra_option_handler=option_handler)
if len(args) != 2:
@@ -630,6 +732,12 @@
print " images don't exceed partition sizes."
print
+ if OPTIONS.script_mode not in ("amend", "edify", "auto"):
+ raise ValueError('unknown script mode "%s"' % (OPTIONS.script_mode,))
+
+ if OPTIONS.extra_script is not None:
+ OPTIONS.extra_script = open(OPTIONS.extra_script).read()
+
print "unzipping target target-files..."
OPTIONS.input_tmp = common.UnzipTemp(args[0])
OPTIONS.target_tmp = OPTIONS.input_tmp
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks
index b632924..6dd8ede 100755
--- a/tools/releasetools/sign_target_files_apks
+++ b/tools/releasetools/sign_target_files_apks
@@ -47,6 +47,20 @@
-d and -k options are added to the set of mappings in the order
in which they appear on the command line.
+
+ -o (--replace_ota_keys)
+ Replace the certificate (public key) used by OTA package
+ verification with the one specified in the input target_files
+ zip (in the META/otakeys.txt file). Key remapping (-k and -d)
+ is performed on this key.
+
+ -t (--tag_changes) <+tag>,<-tag>,...
+ Comma-separated list of changes to make to the set of tags (in
+ the last component of the build fingerprint). Prefix each with
+ '+' or '-' to indicate whether that tag should be added or
+ removed. Changes are processed in the order they appear.
+ Default value is "-test-keys,+ota-rel-keys,+release-keys".
+
"""
import sys
@@ -55,6 +69,8 @@
print >> sys.stderr, "Python 2.4 or newer is required."
sys.exit(1)
+import cStringIO
+import copy
import os
import re
import subprocess
@@ -67,7 +83,8 @@
OPTIONS.extra_apks = {}
OPTIONS.key_map = {}
-
+OPTIONS.replace_ota_keys = False
+OPTIONS.tag_changes = ("-test-keys", "+ota-rel-keys", "+release-keys")
def GetApkCerts(tf_zip):
certmap = {}
@@ -84,6 +101,85 @@
return certmap
+def CheckAllApksSigned(input_tf_zip, apk_key_map):
+ """Check that all the APKs we want to sign have keys specified, and
+ error out if they don't."""
+ unknown_apks = []
+ for info in input_tf_zip.infolist():
+ if info.filename.endswith(".apk"):
+ name = os.path.basename(info.filename)
+ if name not in apk_key_map:
+ unknown_apks.append(name)
+ if unknown_apks:
+ print "ERROR: no key specified for:\n\n ",
+ print "\n ".join(unknown_apks)
+ print "\nUse '-e <apkname>=' to specify a key (which may be an"
+ print "empty string to not sign this apk)."
+ sys.exit(1)
+
+
+def SharedUserForApk(data):
+ tmp = tempfile.NamedTemporaryFile()
+ tmp.write(data)
+ tmp.flush()
+
+ p = common.Run(["aapt", "dump", "xmltree", tmp.name, "AndroidManifest.xml"],
+ stdout=subprocess.PIPE)
+ data, _ = p.communicate()
+ if p.returncode != 0:
+ raise ExternalError("failed to run aapt dump")
+ lines = data.split("\n")
+ for i in lines:
+ m = re.match(r'^\s*A: android:sharedUserId\([0-9a-fx]*\)="([^"]*)" .*$', i)
+ if m:
+ return m.group(1)
+ return None
+
+
+def CheckSharedUserIdsConsistent(input_tf_zip, apk_key_map):
+ """Check that all packages that request the same shared user id are
+ going to be signed with the same key."""
+
+ shared_user_apks = {}
+ maxlen = len("(unknown key)")
+
+ for info in input_tf_zip.infolist():
+ if info.filename.endswith(".apk"):
+ data = input_tf_zip.read(info.filename)
+
+ name = os.path.basename(info.filename)
+ shared_user = SharedUserForApk(data)
+ key = apk_key_map[name]
+ maxlen = max(maxlen, len(key))
+
+ if shared_user is not None:
+ shared_user_apks.setdefault(
+ shared_user, {}).setdefault(key, []).append(name)
+
+ errors = []
+ for k, v in shared_user_apks.iteritems():
+ # each shared user should have exactly one key used for all the
+ # apks that want that user.
+ if len(v) > 1:
+ errors.append((k, v))
+
+ if not errors: return
+
+ print "ERROR: shared user inconsistency. All apks wanting to use"
+ print " a given shared user must be signed with the same key."
+ print
+ errors.sort()
+ for user, keys in errors:
+ print 'shared user id "%s":' % (user,)
+ for key, apps in keys.iteritems():
+ print ' %-*s %s' % (maxlen, key or "(unknown key)", apps[0])
+ for a in apps[1:]:
+ print (' ' * (maxlen+5)) + a
+ print
+
+ sys.exit(1)
+
+
def SignApk(data, keyname, pw):
unsigned = tempfile.NamedTemporaryFile()
unsigned.write(data)
@@ -100,44 +196,107 @@
return data
-def SignApks(input_tf_zip, output_tf_zip):
- apk_key_map = GetApkCerts(input_tf_zip)
-
- key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
-
+def SignApks(input_tf_zip, output_tf_zip, apk_key_map, key_passwords):
maxsize = max([len(os.path.basename(i.filename))
for i in input_tf_zip.infolist()
if i.filename.endswith('.apk')])
for info in input_tf_zip.infolist():
data = input_tf_zip.read(info.filename)
+ out_info = copy.copy(info)
if info.filename.endswith(".apk"):
name = os.path.basename(info.filename)
- key = apk_key_map.get(name, None)
- if key is not None:
- print "signing: %-*s (%s)" % (maxsize, name, key)
+ key = apk_key_map[name]
+ if key:
+ print " signing: %-*s (%s)" % (maxsize, name, key)
signed_data = SignApk(data, key, key_passwords[key])
- output_tf_zip.writestr(info, signed_data)
+ output_tf_zip.writestr(out_info, signed_data)
else:
# an APK we're not supposed to sign.
- print "skipping: %s" % (name,)
- output_tf_zip.writestr(info, data)
- elif info.filename == "SYSTEM/build.prop":
- # Change build fingerprint to reflect the fact that apps are signed.
- m = re.search(r"ro\.build\.fingerprint=.*\b(test-keys)\b.*", data)
- if not m:
- print 'WARNING: ro.build.fingerprint does not contain "test-keys"'
- else:
- data = data[:m.start(1)] + "release-keys" + data[m.end(1):]
- m = re.search(r"ro\.build\.description=.*\b(test-keys)\b.*", data)
- if not m:
- print 'WARNING: ro.build.description does not contain "test-keys"'
- else:
- data = data[:m.start(1)] + "release-keys" + data[m.end(1):]
- output_tf_zip.writestr(info, data)
+ print "NOT signing: %s" % (name,)
+ output_tf_zip.writestr(out_info, data)
+ elif info.filename in ("SYSTEM/build.prop",
+ "RECOVERY/RAMDISK/default.prop"):
+ print "rewriting %s:" % (info.filename,)
+ new_data = RewriteProps(data)
+ output_tf_zip.writestr(out_info, new_data)
else:
# a non-APK file; copy it verbatim
- output_tf_zip.writestr(info, data)
+ output_tf_zip.writestr(out_info, data)
+
+
+def RewriteProps(data):
+ output = []
+ for line in data.split("\n"):
+ line = line.strip()
+ original_line = line
+ if line and line[0] != '#':
+ key, value = line.split("=", 1)
+ if key == "ro.build.fingerprint":
+ pieces = line.split("/")
+ tags = set(pieces[-1].split(","))
+ for ch in OPTIONS.tag_changes:
+ if ch[0] == "-":
+ tags.discard(ch[1:])
+ elif ch[0] == "+":
+ tags.add(ch[1:])
+ line = "/".join(pieces[:-1] + [",".join(sorted(tags))])
+ elif key == "ro.build.description":
+ pieces = line.split(" ")
+ assert len(pieces) == 5
+ tags = set(pieces[-1].split(","))
+ for ch in OPTIONS.tag_changes:
+ if ch[0] == "-":
+ tags.discard(ch[1:])
+ elif ch[0] == "+":
+ tags.add(ch[1:])
+ line = " ".join(pieces[:-1] + [",".join(sorted(tags))])
+ if line != original_line:
+ print " replace: ", original_line
+ print " with: ", line
+ output.append(line)
+ return "\n".join(output) + "\n"
+
+
+def ReplaceOtaKeys(input_tf_zip, output_tf_zip):
+ try:
+ keylist = input_tf_zip.read("META/otakeys.txt").split()
+ except KeyError:
+ raise ExternalError("can't read META/otakeys.txt from input")
+
+ mapped_keys = []
+ for k in keylist:
+ m = re.match(r"^(.*)\.x509\.pem$", k)
+ if not m:
+ raise ExternalError("can't parse \"%s\" from META/otakeys.txt" % (k,))
+ k = m.group(1)
+ mapped_keys.append(OPTIONS.key_map.get(k, k) + ".x509.pem")
+
+ print "using:\n ", "\n ".join(mapped_keys)
+ print "for OTA package verification"
+
+ # recovery uses a version of the key that has been slightly
+ # predigested (by DumpPublicKey.java) and put in res/keys.
+
+ p = common.Run(["java", "-jar",
+ os.path.join(OPTIONS.search_path, "framework", "dumpkey.jar")]
+ + mapped_keys,
+ stdout=subprocess.PIPE)
+ data, _ = p.communicate()
+ if p.returncode != 0:
+ raise ExternalError("failed to run dumpkeys")
+ common.ZipWriteStr(output_tf_zip, "RECOVERY/RAMDISK/res/keys", data)
+
+ # SystemUpdateActivity uses the x509.pem version of the keys, but
+ # put into a zipfile system/etc/security/otacerts.zip.
+
+ tempfile = cStringIO.StringIO()
+ certs_zip = zipfile.ZipFile(tempfile, "w")
+ for k in mapped_keys:
+ certs_zip.write(k)
+ certs_zip.close()
+ common.ZipWriteStr(output_tf_zip, "SYSTEM/etc/security/otacerts.zip",
+ tempfile.getvalue())
def main(argv):
@@ -160,16 +319,28 @@
elif o in ("-k", "--key_mapping"):
s, d = a.split("=")
OPTIONS.key_map[s] = d
+ elif o in ("-o", "--replace_ota_keys"):
+ OPTIONS.replace_ota_keys = True
+ elif o in ("-t", "--tag_changes"):
+ new = []
+ for i in a.split(","):
+ i = i.strip()
+ if not i or i[0] not in "-+":
+ raise ValueError("Bad tag change '%s'" % (i,))
+ new.append(i[0] + i[1:].strip())
+ OPTIONS.tag_changes = tuple(new)
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
- extra_opts="s:e:d:k:",
+ extra_opts="s:e:d:k:ot:",
extra_long_opts=["signapk_jar=",
"extra_apks=",
"default_key_mappings=",
- "key_mapping="],
+ "key_mapping=",
+ "replace_ota_keys",
+ "tag_changes="],
extra_option_handler=option_handler)
if len(args) != 2:
@@ -179,7 +350,15 @@
input_zip = zipfile.ZipFile(args[0], "r")
output_zip = zipfile.ZipFile(args[1], "w")
- SignApks(input_zip, output_zip)
+ apk_key_map = GetApkCerts(input_zip)
+ CheckAllApksSigned(input_zip, apk_key_map)
+ CheckSharedUserIdsConsistent(input_zip, apk_key_map)
+
+ key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
+ SignApks(input_zip, output_zip, apk_key_map, key_passwords)
+
+ if OPTIONS.replace_ota_keys:
+ ReplaceOtaKeys(input_zip, output_zip)
input_zip.close()
output_zip.close()
diff --git a/tools/signapk/SignApk.java b/tools/signapk/SignApk.java
index 340a9f5..caf7935 100644
--- a/tools/signapk/SignApk.java
+++ b/tools/signapk/SignApk.java
@@ -62,6 +62,7 @@
import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
+import java.util.regex.Pattern;
import javax.crypto.Cipher;
import javax.crypto.EncryptedPrivateKeyInfo;
import javax.crypto.SecretKeyFactory;
@@ -75,6 +76,10 @@
private static final String CERT_SF_NAME = "META-INF/CERT.SF";
private static final String CERT_RSA_NAME = "META-INF/CERT.RSA";
+ // Files matching this pattern are not copied to the output.
+ private static Pattern stripPattern =
+ Pattern.compile("^META-INF/(.*)[.](SF|RSA|DSA)$");
+
private static X509Certificate readPublicKey(File file)
throws IOException, GeneralSecurityException {
FileInputStream input = new FileInputStream(file);
@@ -193,7 +198,9 @@
for (JarEntry entry: byName.values()) {
String name = entry.getName();
if (!entry.isDirectory() && !name.equals(JarFile.MANIFEST_NAME) &&
- !name.equals(CERT_SF_NAME) && !name.equals(CERT_RSA_NAME)) {
+ !name.equals(CERT_SF_NAME) && !name.equals(CERT_RSA_NAME) &&
+ (stripPattern == null ||
+ !stripPattern.matcher(name).matches())) {
InputStream data = jar.getInputStream(entry);
while ((num = data.read(buffer)) > 0) {
md.update(buffer, 0, num);
@@ -297,9 +304,14 @@
pkcs7.encodeSignedData(out);
}
- /** Copy all the files in a manifest from input to output. */
+ /**
+ * Copy all the files in a manifest from input to output. We set
+ * the modification times in the output to a fixed time, so as to
+ * reduce variation in the output file and make incremental OTAs
+ * more efficient.
+ */
private static void copyFiles(Manifest manifest,
- JarFile in, JarOutputStream out) throws IOException {
+ JarFile in, JarOutputStream out, long timestamp) throws IOException {
byte[] buffer = new byte[4096];
int num;
@@ -308,15 +320,16 @@
Collections.sort(names);
for (String name : names) {
JarEntry inEntry = in.getJarEntry(name);
+ JarEntry outEntry = null;
if (inEntry.getMethod() == JarEntry.STORED) {
// Preserve the STORED method of the input entry.
- out.putNextEntry(new JarEntry(inEntry));
+ outEntry = new JarEntry(inEntry);
} else {
// Create a new entry so that the compressed len is recomputed.
- JarEntry je = new JarEntry(name);
- je.setTime(inEntry.getTime());
- out.putNextEntry(je);
+ outEntry = new JarEntry(name);
}
+ outEntry.setTime(timestamp);
+ out.putNextEntry(outEntry);
InputStream data = in.getInputStream(inEntry);
while ((num = data.read(buffer)) > 0) {
@@ -373,7 +386,7 @@
writeSignatureBlock(signature, publicKey, outputJar);
// Everything else
- copyFiles(manifest, inputJar, outputJar);
+ copyFiles(manifest, inputJar, outputJar, timestamp);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index 9e3cb66..058f9ed 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -30,7 +30,8 @@
{
fprintf(stderr, "Zip alignment utility\n");
fprintf(stderr,
- "Usage: zipalign [-f] [-v] <align> infile.zip outfile.zip\n");
+ "Usage: zipalign [-f] [-v] <align> infile.zip outfile.zip\n"
+ " zipalign -c [-v] <align> infile.zip\n" );
}
/*
@@ -152,14 +153,14 @@
pEntry = zipFile.getEntryByIndex(i);
if (pEntry->isCompressed()) {
if (verbose) {
- printf("%8ld %s (OK - compressed)\n",
+ printf("%8ld %s (OK - compressed)\n",
(long) pEntry->getFileOffset(), pEntry->getFileName());
}
} else {
long offset = pEntry->getFileOffset();
if ((offset % alignment) != 0) {
if (verbose) {
- printf("%8ld %s (BAD - %ld)\n",
+ printf("%8ld %s (BAD - %ld)\n",
(long) offset, pEntry->getFileName(),
offset % alignment);
}
@@ -185,6 +186,7 @@
int main(int argc, char* const argv[])
{
bool wantUsage = false;
+ bool check = false;
bool force = false;
bool verbose = false;
int result = 1;
@@ -204,6 +206,9 @@
while (*cp != '\0') {
switch (*cp) {
+ case 'c':
+ check = true;
+ break;
case 'f':
force = true;
break;
@@ -223,7 +228,7 @@
argv++;
}
- if (argc != 3) {
+ if (!((check && argc == 2) || (!check && argc == 3))) {
wantUsage = true;
goto bail;
}
@@ -235,12 +240,17 @@
goto bail;
}
- /* create the new archive */
- result = process(argv[1], argv[2], alignment, force);
+ if (check) {
+ /* check existing archive for correct alignment */
+ result = verify(argv[1], alignment, verbose);
+ } else {
+ /* create the new archive */
+ result = process(argv[1], argv[2], alignment, force);
- /* trust, but verify */
- if (result == 0)
- result = verify(argv[2], alignment, verbose);
+ /* trust, but verify */
+ if (result == 0)
+ result = verify(argv[2], alignment, verbose);
+ }
bail:
if (wantUsage) {
@@ -250,4 +260,3 @@
return result;
}
-