Merge "Zipalign: Avoid unnecessary padding"
diff --git a/core/Makefile b/core/Makefile
index 63ec1a6..4b2a331 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -2430,6 +2430,7 @@
#
# Note: it's intentional to skip signing for boot-debug.img, because it
# can only be used if the device is unlocked with verification error.
+ifneq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
ifneq ($(strip $(BOARD_KERNEL_BINARIES)),)
@@ -2488,6 +2489,7 @@
endif # TARGET_NO_KERNEL
endif # INSTALLED_BOOTIMAGE_TARGET
+endif # BUILDING_VENDOR_BOOT_IMAGE is not true
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
ifeq ($(BUILDING_RAMDISK_IMAGE),true)
@@ -2633,6 +2635,7 @@
#
# Note: it's intentional to skip signing for boot-test-harness.img, because it
# can only be used if the device is unlocked with verification error.
+ifneq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
@@ -2675,6 +2678,7 @@
endif # TARGET_NO_KERNEL
endif # INSTALLED_BOOTIMAGE_TARGET
+endif # BUILDING_VENDOR_BOOT_IMAGE is not true
endif # BOARD_BUILD_SYSTEM_ROOT_IMAGE is not true
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
diff --git a/core/autogen_test_config.mk b/core/autogen_test_config.mk
index 137b118..798dd5f 100644
--- a/core/autogen_test_config.mk
+++ b/core/autogen_test_config.mk
@@ -22,8 +22,7 @@
# autogen_test_config_file: Path to the test config file generated.
autogen_test_config_file := $(dir $(LOCAL_BUILT_MODULE))$(LOCAL_MODULE).config
-# TODO: (b/167308193) Switch to /data/local/tests/unrestricted as the default install base.
-autogen_test_install_base := /data/local/tmp
+autogen_test_install_base := /data/local/tests/unrestricted
# Automatically setup test root for native test.
ifeq (true,$(is_native))
ifeq (true,$(LOCAL_VENDOR_MODULE))
diff --git a/core/native_benchmark_test_config_template.xml b/core/native_benchmark_test_config_template.xml
index d1f0199..8a89241 100644
--- a/core/native_benchmark_test_config_template.xml
+++ b/core/native_benchmark_test_config_template.xml
@@ -22,10 +22,10 @@
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
- <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ <option name="push" value="{MODULE}->/data/local/tests/unrestricted/{MODULE}" />
</target_preparer>
<test class="com.android.tradefed.testtype.GoogleBenchmarkTest" >
- <option name="native-benchmark-device-path" value="/data/local/tmp" />
+ <option name="native-benchmark-device-path" value="/data/local/tests/unrestricted" />
<option name="benchmark-module-name" value="{MODULE}" />
</test>
</configuration>
diff --git a/core/rust_device_benchmark_config_template.xml b/core/rust_device_benchmark_config_template.xml
index 2055df2..a117fc4 100644
--- a/core/rust_device_benchmark_config_template.xml
+++ b/core/rust_device_benchmark_config_template.xml
@@ -17,11 +17,11 @@
<configuration description="Config to run {MODULE} rust benchmark tests.">
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="false" />
- <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ <option name="push" value="{MODULE}->/data/local/tests/unrestricted/{MODULE}" />
</target_preparer>
<test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
- <option name="test-device-path" value="/data/local/tmp" />
+ <option name="test-device-path" value="/data/local/tests/unrestricted" />
<option name="module-name" value="{MODULE}" />
<option name="is-benchmark" value="true" />
</test>
diff --git a/core/rust_device_test_config_template.xml b/core/rust_device_test_config_template.xml
index 9429d38..536f57e 100644
--- a/core/rust_device_test_config_template.xml
+++ b/core/rust_device_test_config_template.xml
@@ -17,11 +17,11 @@
<configuration description="Config to run {MODULE} device tests.">
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
- <option name="push" value="{MODULE}->/data/local/tmp/{MODULE}" />
+ <option name="push" value="{MODULE}->/data/local/tests/unrestricted/{MODULE}" />
</target_preparer>
<test class="com.android.tradefed.testtype.rust.RustBinaryTest" >
- <option name="test-device-path" value="/data/local/tmp" />
+ <option name="test-device-path" value="/data/local/tests/unrestricted" />
<option name="module-name" value="{MODULE}" />
</test>
</configuration>
diff --git a/core/soong_config.mk b/core/soong_config.mk
index a39707e..9eb02b2 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -165,6 +165,7 @@
$(call add_json_list, VendorSnapshotDirsExcluded, $(VENDOR_SNAPSHOT_DIRS_EXCLUDED))
$(call add_json_list, RecoverySnapshotDirsIncluded, $(RECOVERY_SNAPSHOT_DIRS_INCLUDED))
$(call add_json_list, RecoverySnapshotDirsExcluded, $(RECOVERY_SNAPSHOT_DIRS_EXCLUDED))
+$(call add_json_bool, HostFakeSnapshotEnabled, $(HOST_FAKE_SNAPSHOT_ENABLE))
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
diff --git a/tools/generate-notice-files.py b/tools/generate-notice-files.py
index bf958fb..5e3010f 100755
--- a/tools/generate-notice-files.py
+++ b/tools/generate-notice-files.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (C) 2012 The Android Open Source Project
#
@@ -30,20 +30,18 @@
import os
import os.path
import re
+import struct
import sys
MD5_BLOCKSIZE = 1024 * 1024
HTML_ESCAPE_TABLE = {
- "&": "&",
- '"': """,
- "'": "'",
- ">": ">",
- "<": "<",
+ b"&": b"&",
+ b'"': b""",
+ b"'": b"'",
+ b">": b">",
+ b"<": b"<",
}
-def hexify(s):
- return ("%02x"*len(s)) % tuple(map(ord, s))
-
def md5sum(filename):
"""Calculate an MD5 of the file given by FILENAME,
and return hex digest as a string.
@@ -57,20 +55,26 @@
break
sum.update(block)
f.close()
- return hexify(sum.digest())
+ return sum.hexdigest()
def html_escape(text):
"""Produce entities within text."""
- return "".join(HTML_ESCAPE_TABLE.get(c,c) for c in text)
+ # Using for i in text doesn't work since i will be an int, not a byte.
+ # There are multiple ways to solve this, but the most performant way
+ # to iterate over a byte array is to use unpack. Using the
+ # for i in range(len(text)) and using that to get a byte using array
+ # slices is twice as slow as this method.
+ return b"".join(HTML_ESCAPE_TABLE.get(i,i) for i in struct.unpack(str(len(text)) + 'c', text))
-HTML_OUTPUT_CSS="""
+HTML_OUTPUT_CSS=b"""
<style type="text/css">
body { padding: 0; font-family: sans-serif; }
.same-license { background-color: #eeeeee; border-top: 20px solid white; padding: 10px; }
.label { font-weight: bold; }
.file-list { margin-left: 1em; color: blue; }
</style>
+
"""
def combine_notice_files_html(file_hash, input_dirs, output_filename):
@@ -90,13 +94,13 @@
# Open the output file, and output the header pieces
output_file = open(output_filename, "wb")
- print >> output_file, "<html><head>"
- print >> output_file, HTML_OUTPUT_CSS
- print >> output_file, '</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">'
+ output_file.write(b"<html><head>\n")
+ output_file.write(HTML_OUTPUT_CSS)
+ output_file.write(b'</head><body topmargin="0" leftmargin="0" rightmargin="0" bottommargin="0">\n')
# Output our table of contents
- print >> output_file, '<div class="toc">'
- print >> output_file, "<ul>"
+ output_file.write(b'<div class="toc">\n')
+ output_file.write(b"<ul>\n")
# Flatten the list of lists into a single list of filenames
sorted_filenames = sorted(itertools.chain.from_iterable(file_hash))
@@ -104,31 +108,29 @@
# Print out a nice table of contents
for filename in sorted_filenames:
stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
- print >> output_file, '<li><a href="#id%d">%s</a></li>' % (id_table.get(filename), stripped_filename)
+ output_file.write(('<li><a href="#id%d">%s</a></li>\n' % (id_table.get(filename), stripped_filename)).encode())
- print >> output_file, "</ul>"
- print >> output_file, "</div><!-- table of contents -->"
+ output_file.write(b"</ul>\n")
+ output_file.write(b"</div><!-- table of contents -->\n")
# Output the individual notice file lists
- print >>output_file, '<table cellpadding="0" cellspacing="0" border="0">'
+ output_file.write(b'<table cellpadding="0" cellspacing="0" border="0">\n')
for value in file_hash:
- print >> output_file, '<tr id="id%d"><td class="same-license">' % id_table.get(value[0])
- print >> output_file, '<div class="label">Notices for file(s):</div>'
- print >> output_file, '<div class="file-list">'
+ output_file.write(b'<tr id="id%d"><td class="same-license">\n' % id_table.get(value[0]))
+ output_file.write(b'<div class="label">Notices for file(s):</div>\n')
+ output_file.write(b'<div class="file-list">\n')
for filename in value:
- print >> output_file, "%s <br/>" % (SRC_DIR_STRIP_RE.sub(r"\1", filename))
- print >> output_file, "</div><!-- file-list -->"
- print >> output_file
- print >> output_file, '<pre class="license-text">'
- print >> output_file, html_escape(open(value[0]).read())
- print >> output_file, "</pre><!-- license-text -->"
- print >> output_file, "</td></tr><!-- same-license -->"
- print >> output_file
- print >> output_file
- print >> output_file
+ output_file.write(("%s <br/>\n" % SRC_DIR_STRIP_RE.sub(r"\1", filename)).encode())
+ output_file.write(b"</div><!-- file-list -->\n")
+ output_file.write(b"\n")
+ output_file.write(b'<pre class="license-text">\n')
+ with open(value[0], "rb") as notice_file:
+ output_file.write(html_escape(notice_file.read()))
+ output_file.write(b"\n</pre><!-- license-text -->\n")
+ output_file.write(b"</td></tr><!-- same-license -->\n\n\n\n")
# Finish off the file output
- print >> output_file, "</table>"
- print >> output_file, "</body></html>"
+ output_file.write(b"</table>\n")
+ output_file.write(b"</body></html>\n")
output_file.close()
def combine_notice_files_text(file_hash, input_dirs, output_filename, file_title):
@@ -136,14 +138,18 @@
SRC_DIR_STRIP_RE = re.compile("(?:" + "|".join(input_dirs) + ")(/.*).txt")
output_file = open(output_filename, "wb")
- print >> output_file, file_title
+ output_file.write(file_title.encode())
+ output_file.write(b"\n")
for value in file_hash:
- print >> output_file, "============================================================"
- print >> output_file, "Notices for file(s):"
- for filename in value:
- print >> output_file, SRC_DIR_STRIP_RE.sub(r"\1", filename)
- print >> output_file, "------------------------------------------------------------"
- print >> output_file, open(value[0]).read()
+ output_file.write(b"============================================================\n")
+ output_file.write(b"Notices for file(s):\n")
+ for filename in value:
+ output_file.write(SRC_DIR_STRIP_RE.sub(r"\1", filename).encode())
+ output_file.write(b"\n")
+ output_file.write(b"------------------------------------------------------------\n")
+ with open(value[0], "rb") as notice_file:
+ output_file.write(notice_file.read())
+ output_file.write(b"\n")
output_file.close()
def combine_notice_files_xml(files_with_same_hash, input_dirs, output_filename):
@@ -154,15 +160,15 @@
# Set up a filename to row id table (anchors inside tables don't work in
# most browsers, but href's to table row ids do)
id_table = {}
- for file_key in files_with_same_hash.keys():
- for filename in files_with_same_hash[file_key]:
+ for file_key, files in files_with_same_hash.items():
+ for filename in files:
id_table[filename] = file_key
# Open the output file, and output the header pieces
output_file = open(output_filename, "wb")
- print >> output_file, '<?xml version="1.0" encoding="utf-8"?>'
- print >> output_file, "<licenses>"
+ output_file.write(b'<?xml version="1.0" encoding="utf-8"?>\n')
+ output_file.write(b"<licenses>\n")
# Flatten the list of lists into a single list of filenames
sorted_filenames = sorted(id_table.keys())
@@ -170,10 +176,8 @@
# Print out a nice table of contents
for filename in sorted_filenames:
stripped_filename = SRC_DIR_STRIP_RE.sub(r"\1", filename)
- print >> output_file, '<file-name contentId="%s">%s</file-name>' % (id_table.get(filename), stripped_filename)
-
- print >> output_file
- print >> output_file
+ output_file.write(('<file-name contentId="%s">%s</file-name>\n' % (id_table.get(filename), stripped_filename)).encode())
+ output_file.write(b"\n\n")
processed_file_keys = []
# Output the individual notice file lists
@@ -183,11 +187,13 @@
continue
processed_file_keys.append(file_key)
- print >> output_file, '<file-content contentId="%s"><![CDATA[%s]]></file-content>' % (file_key, html_escape(open(filename).read()))
- print >> output_file
+ output_file.write(('<file-content contentId="%s"><![CDATA[' % file_key).encode())
+ with open(filename, "rb") as notice_file:
+ output_file.write(html_escape(notice_file.read()))
+ output_file.write(b"]]></file-content>\n\n")
# Finish off the file output
- print >> output_file, "</licenses>"
+ output_file.write(b"</licenses>\n")
output_file.close()
def get_args():
@@ -254,7 +260,7 @@
file_md5sum = md5sum(filename)
files_with_same_hash[file_md5sum].append(filename)
- filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(files_with_same_hash.keys())]
+ filesets = [sorted(files_with_same_hash[md5]) for md5 in sorted(list(files_with_same_hash))]
combine_notice_files_text(filesets, input_dirs, txt_output_file, file_title)
if html_output_file is not None:
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 5275087..f3b58f8 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -42,10 +42,6 @@
--is_signing
Skip building & adding the images for "userdata" and "cache" if we
are signing the target files.
-
- --skip_list <partitions>
- Optional comma-separated list of partitions to skip when
- processing the zipfile.
"""
from __future__ import print_function
@@ -82,7 +78,6 @@
OPTIONS.replace_verity_public_key = False
OPTIONS.replace_verity_private_key = False
OPTIONS.is_signing = False
-OPTIONS.skip_list = []
# Use a fixed timestamp (01/01/2009 00:00:00 UTC) for files when packaging
# images. (b/24377993, b/80600931)
@@ -594,7 +589,7 @@
AssertionError: If it can't find an image.
"""
for partition in ab_partitions:
- img_name = partition + ".img"
+ img_name = partition.strip() + ".img"
# Assert that the image is present under IMAGES/ now.
if output_zip:
@@ -801,7 +796,7 @@
logger.info("\n\n++++ %s ++++\n\n", s)
boot_image = None
- if has_boot and "boot" not in OPTIONS.skip_list:
+ if has_boot:
banner("boot")
boot_images = OPTIONS.info_dict.get("boot_images")
if boot_images is None:
@@ -822,7 +817,7 @@
if output_zip:
boot_image.AddToZip(output_zip)
- if has_vendor_boot and "vendor_boot" not in OPTIONS.skip_list:
+ if has_vendor_boot:
banner("vendor_boot")
vendor_boot_image = common.GetVendorBootImage(
"IMAGES/vendor_boot.img", "vendor_boot.img", OPTIONS.input_tmp,
@@ -836,7 +831,7 @@
vendor_boot_image.AddToZip(output_zip)
recovery_image = None
- if has_recovery and "recovery" not in OPTIONS.skip_list:
+ if has_recovery:
banner("recovery")
recovery_image = common.GetBootableImage(
"IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
@@ -861,39 +856,59 @@
if output_zip:
recovery_two_step_image.AddToZip(output_zip)
- def add_partition(call):
- partition, has_partition, add_func, add_args = call
- if has_partition and partition not in OPTIONS.skip_list:
- banner(partition)
- partitions[partition] = add_func(output_zip, *add_args)
+ if has_system:
+ banner("system")
+ partitions['system'] = AddSystem(
+ output_zip, recovery_img=recovery_image, boot_img=boot_image)
- add_partition_calls = (
- ("system", has_system, AddSystem, [recovery_image, boot_image]),
- ("vendor", has_vendor, AddVendor, [recovery_image, boot_image]),
- ("product", has_product, AddProduct, []),
- ("system_ext", has_system_ext, AddSystemExt, []),
- ("odm", has_odm, AddOdm, []),
- ("vendor_dlkm", has_vendor_dlkm, AddVendorDlkm, []),
- ("odm_dlkm", has_odm_dlkm, AddOdmDlkm, []),
- ("system_other", has_system_other, AddSystemOther, []),
- )
- for call in add_partition_calls:
- add_partition(call)
+ if has_vendor:
+ banner("vendor")
+ partitions['vendor'] = AddVendor(
+ output_zip, recovery_img=recovery_image, boot_img=boot_image)
+
+ if has_product:
+ banner("product")
+ partitions['product'] = AddProduct(output_zip)
+
+ if has_system_ext:
+ banner("system_ext")
+ partitions['system_ext'] = AddSystemExt(output_zip)
+
+ if has_odm:
+ banner("odm")
+ partitions['odm'] = AddOdm(output_zip)
+
+ if has_vendor_dlkm:
+ banner("vendor_dlkm")
+ partitions['vendor_dlkm'] = AddVendorDlkm(output_zip)
+
+ if has_odm_dlkm:
+ banner("odm_dlkm")
+ partitions['odm_dlkm'] = AddOdmDlkm(output_zip)
+
+ if has_system_other:
+ banner("system_other")
+ AddSystemOther(output_zip)
AddApexInfo(output_zip)
if not OPTIONS.is_signing:
- add_partition(("userdata", True, AddUserdata, []))
- add_partition(("cache", True, AddUserdata, []))
+ banner("userdata")
+ AddUserdata(output_zip)
+ banner("cache")
+ AddCache(output_zip)
if OPTIONS.info_dict.get("board_bpt_enable") == "true":
banner("partition-table")
AddPartitionTable(output_zip)
- add_partition(
- ("dtbo", OPTIONS.info_dict.get("has_dtbo") == "true", AddDtbo, []))
- add_partition(
- ("pvmfw", OPTIONS.info_dict.get("has_pvmfw") == "true", AddPvmfw, []))
+ if OPTIONS.info_dict.get("has_dtbo") == "true":
+ banner("dtbo")
+ partitions['dtbo'] = AddDtbo(output_zip)
+
+ if OPTIONS.info_dict.get("has_pvmfw") == "true":
+ banner("pvmfw")
+ partitions['pvmfw'] = AddPvmfw(output_zip)
# Custom images.
custom_partitions = OPTIONS.info_dict.get(
@@ -911,7 +926,7 @@
vbmeta_partitions = common.AVB_PARTITIONS[:] + tuple(custom_partitions)
vbmeta_system = OPTIONS.info_dict.get("avb_vbmeta_system", "").strip()
- if vbmeta_system and "vbmeta_system" not in OPTIONS.skip_list:
+ if vbmeta_system:
banner("vbmeta_system")
partitions["vbmeta_system"] = AddVBMeta(
output_zip, partitions, "vbmeta_system", vbmeta_system.split())
@@ -921,7 +936,7 @@
vbmeta_partitions.append("vbmeta_system")
vbmeta_vendor = OPTIONS.info_dict.get("avb_vbmeta_vendor", "").strip()
- if vbmeta_vendor and "vbmeta_vendor" not in OPTIONS.skip_list:
+ if vbmeta_vendor:
banner("vbmeta_vendor")
partitions["vbmeta_vendor"] = AddVBMeta(
output_zip, partitions, "vbmeta_vendor", vbmeta_vendor.split())
@@ -930,20 +945,16 @@
if item not in vbmeta_vendor.split()]
vbmeta_partitions.append("vbmeta_vendor")
- if OPTIONS.info_dict.get("avb_building_vbmeta_image"
- ) == "true" and "vbmeta" not in OPTIONS.skip_list:
+ if OPTIONS.info_dict.get("avb_building_vbmeta_image") == "true":
banner("vbmeta")
AddVBMeta(output_zip, partitions, "vbmeta", vbmeta_partitions)
if OPTIONS.info_dict.get("use_dynamic_partitions") == "true":
- if OPTIONS.info_dict.get(
- "build_super_empty_partition"
- ) == "true" and "super_empty" not in OPTIONS.skip_list:
+ if OPTIONS.info_dict.get("build_super_empty_partition") == "true":
banner("super_empty")
AddSuperEmpty(output_zip)
- if OPTIONS.info_dict.get(
- "build_super_partition") == "true" and "super" not in OPTIONS.skip_list:
+ if OPTIONS.info_dict.get("build_super_partition") == "true":
if OPTIONS.info_dict.get(
"build_retrofit_dynamic_partitions_ota_package") == "true":
banner("super split images")
@@ -954,20 +965,17 @@
"ab_partitions.txt")
if os.path.exists(ab_partitions_txt):
with open(ab_partitions_txt) as f:
- ab_partitions = f.read().splitlines()
+ ab_partitions = f.readlines()
- # Skip care_map generation if any A/B partitions are in the skip list,
- # since this file cannot be generated without those partitions.
- if not set(ab_partitions).intersection(set(OPTIONS.skip_list)):
- # For devices using A/B update, make sure we have all the needed images
- # ready under IMAGES/ or RADIO/.
- CheckAbOtaImages(output_zip, ab_partitions)
+ # For devices using A/B update, make sure we have all the needed images
+ # ready under IMAGES/ or RADIO/.
+ CheckAbOtaImages(output_zip, ab_partitions)
- # Generate care_map.pb for ab_partitions, then write this file to
- # target_files package.
- output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
- AddCareMapForAbOta(output_zip if output_zip else output_care_map,
- ab_partitions, partitions)
+ # Generate care_map.pb for ab_partitions, then write this file to
+ # target_files package.
+ output_care_map = os.path.join(OPTIONS.input_tmp, "META", "care_map.pb")
+ AddCareMapForAbOta(output_zip if output_zip else output_care_map,
+ ab_partitions, partitions)
# Radio images that need to be packed into IMAGES/, and product-img.zip.
pack_radioimages_txt = os.path.join(
@@ -976,8 +984,7 @@
with open(pack_radioimages_txt) as f:
AddPackRadioImages(output_zip, f.readlines())
- if "vbmeta" not in OPTIONS.skip_list:
- AddVbmetaDigest(output_zip)
+ AddVbmetaDigest(output_zip)
if output_zip:
common.ZipClose(output_zip)
@@ -998,8 +1005,6 @@
OPTIONS.replace_verity_public_key = (True, a)
elif o == "--is_signing":
OPTIONS.is_signing = True
- elif o == "--skip_list":
- OPTIONS.skip_list = a.split(',')
else:
return False
return True
@@ -1009,7 +1014,7 @@
extra_long_opts=["add_missing", "rebuild_recovery",
"replace_verity_public_key=",
"replace_verity_private_key=",
- "is_signing","skip_list="],
+ "is_signing"],
extra_option_handler=option_handler)
if len(args) != 1:
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index 47703bb..0842af9 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -189,8 +189,6 @@
OPTIONS.gki_signing_algorithm = None
OPTIONS.gki_signing_extra_args = None
OPTIONS.android_jar_path = None
-OPTIONS.vendor_partitions = []
-OPTIONS.vendor_otatools = None
AVB_FOOTER_ARGS_BY_PARTITION = {
@@ -1291,10 +1289,6 @@
OPTIONS.gki_signing_algorithm = a
elif o == "--gki_signing_extra_args":
OPTIONS.gki_signing_extra_args = a
- elif o == "--vendor_otatools":
- OPTIONS.vendor_otatools = a
- elif o == "--vendor_partitions":
- OPTIONS.vendor_partitions = a.split(",")
else:
return False
return True
@@ -1345,8 +1339,6 @@
"gki_signing_key=",
"gki_signing_algorithm=",
"gki_signing_extra_args=",
- "vendor_partitions=",
- "vendor_otatools=",
],
extra_option_handler=option_handler)
@@ -1393,12 +1385,7 @@
common.ZipClose(output_zip)
# Skip building userdata.img and cache.img when signing the target files.
- new_args = ["--is_signing", "--verbose"]
- if OPTIONS.vendor_partitions:
- new_args += [
- "--skip_list",
- ','.join(OPTIONS.vendor_partitions),
- ]
+ new_args = ["--is_signing"]
# add_img_to_target_files builds the system image from scratch, so the
# recovery patch is guaranteed to be regenerated there.
if OPTIONS.rebuild_recovery:
@@ -1406,19 +1393,6 @@
new_args.append(args[1])
add_img_to_target_files.main(new_args)
- # Rebuild the vendor partitions using vendor_otatools.
- # TODO(b/192253131): Remove the need for image compilation with vendor_otatools
- if OPTIONS.vendor_partitions and OPTIONS.vendor_otatools:
- vendor_otatools_dir = common.MakeTempDir(prefix="vendor_otatools_")
- common.UnzipToDir(OPTIONS.vendor_otatools, vendor_otatools_dir)
- cmd = [
- os.path.join(vendor_otatools_dir, "bin", "add_img_to_target_files"),
- "--verbose",
- "--add_missing",
- args[1],
- ]
- common.RunAndCheckOutput(cmd, verbose=True)
-
print("done.")