Merge "Extend hardcoded list of optional uses-libraries."
diff --git a/core/Makefile b/core/Makefile
index 72929bf..94b4803 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -5374,7 +5374,7 @@
tool_extension := $(wildcard $(tool_extensions)/releasetools.py)
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_TOOL_EXTENSION := $(tool_extension)
-updaer_dep :=
+updater_dep :=
ifeq ($(AB_OTA_UPDATER),true)
updater_dep += system/update_engine/update_engine.conf
$(call declare-1p-target,system/update_engine/update_engine.conf,system/update_engine)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 7ea9b52..355a22e 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -20,7 +20,11 @@
# Users can define base-rules-hook in their buildspec.mk to perform
# arbitrary operations as each module is included.
ifdef base-rules-hook
-$(if $(base-rules-hook),)
+ ifndef _has_warned_about_base_rules_hook
+ $(warning base-rules-hook is deprecated, please remove usages of it and/or convert to Soong.)
+ _has_warned_about_base_rules_hook := true
+ endif
+ $(if $(base-rules-hook),)
endif
###########################################################
diff --git a/core/board_config.mk b/core/board_config.mk
index dc50a68..8074225 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -234,10 +234,7 @@
.KATI_READONLY := TARGET_DEVICE_DIR
endif
-# TODO(colefaust) change this if to RBC_PRODUCT_CONFIG when
-# the board configuration is known to work on everything
-# the product config works on.
-ifndef RBC_BOARD_CONFIG
+ifndef RBC_PRODUCT_CONFIG
include $(board_config_mk)
else
$(shell mkdir -p $(OUT_DIR)/rbc)
diff --git a/core/config.mk b/core/config.mk
index e9dedfd..4db33f1 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -861,6 +861,7 @@
30.0 \
31.0 \
32.0 \
+ 33.0 \
.KATI_READONLY := \
PLATFORM_SEPOLICY_COMPAT_VERSIONS \
diff --git a/core/definitions.mk b/core/definitions.mk
index a3f12e7..8fe5edb 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2609,7 +2609,7 @@
@mkdir -p $(dir $@)tmp
$(hide) rm -f $(dir $@)classes*.dex $(dir $@)d8_input.jar
$(hide) $(ZIP2ZIP) -j -i $< -o $(dir $@)d8_input.jar "**/*.class"
-$(hide) $(D8_WRAPPER) $(DX_COMMAND) $(D8_FLAGS) \
+$(hide) $(D8_WRAPPER) $(D8_COMMAND) \
--output $(dir $@)tmp \
$(addprefix --lib ,$(PRIVATE_D8_LIBS)) \
--min-api $(PRIVATE_MIN_SDK_VERSION) \
@@ -3213,7 +3213,7 @@
define transform-jar-to-dex-r8
@echo R8: $@
$(hide) rm -f $(PRIVATE_PROGUARD_DICTIONARY)
-$(hide) $(R8_WRAPPER) $(R8_COMPAT_PROGUARD) $(R8_FLAGS) \
+$(hide) $(R8_WRAPPER) $(R8_COMMAND) \
-injars '$<' \
--min-api $(PRIVATE_MIN_SDK_VERSION) \
--no-data-resources \
diff --git a/core/java.mk b/core/java.mk
index a29f820..01951c0 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -494,13 +494,13 @@
$(built_dex_intermediate): PRIVATE_EXTRA_INPUT_JAR := $(extra_input_jar)
$(built_dex_intermediate): PRIVATE_PROGUARD_FLAGS := $(legacy_proguard_flags) $(common_proguard_flags) $(LOCAL_PROGUARD_FLAGS)
$(built_dex_intermediate): PRIVATE_PROGUARD_DICTIONARY := $(proguard_dictionary)
- $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8_COMPAT_PROGUARD) $(LOCAL_PROGUARD_FLAGS_DEPS)
+ $(built_dex_intermediate) : $(full_classes_pre_proguard_jar) $(extra_input_jar) $(my_proguard_sdk_raise) $(common_proguard_flag_files) $(legacy_proguard_lib_deps) $(R8) $(LOCAL_PROGUARD_FLAGS_DEPS)
$(transform-jar-to-dex-r8)
else # !LOCAL_PROGUARD_ENABLED
$(built_dex_intermediate): .KATI_NINJA_POOL := $(D8_NINJA_POOL)
$(built_dex_intermediate): PRIVATE_D8_LIBS := $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
$(built_dex_intermediate): $(full_java_bootclasspath_libs) $(full_shared_java_header_libs)
- $(built_dex_intermediate): $(full_classes_pre_proguard_jar) $(DX) $(ZIP2ZIP)
+ $(built_dex_intermediate): $(full_classes_pre_proguard_jar) $(D8) $(ZIP2ZIP)
$(transform-classes.jar-to-dex)
endif
diff --git a/core/main.mk b/core/main.mk
index e0efdad..171a761 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -931,7 +931,7 @@
$(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
$(foreach dep,$(my_deps),\
$(foreach f,$(ALL_MODULES.$(dep).HOST_SHARED_LIBRARY_FILES),\
- $(if $(filter $(suite),device-tests general-tests),\
+ $(if $(filter $(suite),device-tests general-tests art-host-tests host-unit-tests),\
$(eval my_testcases := $(HOST_OUT_TESTCASES)),\
$(eval my_testcases := $$(COMPATIBILITY_TESTCASES_OUT_$(suite))))\
$(eval target := $(my_testcases)/$(lastword $(subst /, ,$(dir $(f))))/$(notdir $(f)))\
diff --git a/core/product-graph.mk b/core/product-graph.mk
index 379110e..4a44837 100644
--- a/core/product-graph.mk
+++ b/core/product-graph.mk
@@ -25,7 +25,7 @@
$(if $(filter $(p),$(_all_products_visited)),, \
$(p) \
$(eval _all_products_visited += $(p)) \
- $(call all-products-inner, $(PRODUCTS.$(strip $(p)).INHERITS_FROM))
+ $(call gather-all-makefiles-for-current-product-inner, $(PRODUCTS.$(strip $(p)).INHERITS_FROM))
) \
)
endef
diff --git a/core/product_config.mk b/core/product_config.mk
index 1e74fa9..35f018d 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -230,7 +230,6 @@
ifneq (,$(filter $(TARGET_PRODUCT),$(products_using_starlark_config)))
RBC_PRODUCT_CONFIG := true
- RBC_BOARD_CONFIG := true
endif
ifndef RBC_PRODUCT_CONFIG
diff --git a/core/product_config.rbc b/core/product_config.rbc
index 7ee3dc7..f67ba8e 100644
--- a/core/product_config.rbc
+++ b/core/product_config.rbc
@@ -536,8 +536,11 @@
"""If from file exists, returns [from:to] pair."""
value = path_pair.split(":", 2)
+ if value[0].find('*') != -1:
+ fail("copy_if_exists: input file cannot contain *")
+
# Check that l[0] exists
- return [":".join(value)] if rblf_file_exists(value[0]) else []
+ return [":".join(value)] if rblf_wildcard(value[0]) else []
def _enforce_product_packages_exist(handle, pkg_string_or_list=[]):
"""Makes including non-existent modules in PRODUCT_PACKAGES an error."""
@@ -552,10 +555,6 @@
_setdefault(handle, "PRODUCT_DEX_PREOPT_MODULE_CONFIGS")
handle.cfg["PRODUCT_DEX_PREOPT_MODULE_CONFIGS"] += [m + "=" + config for m in modules]
-def _file_wildcard_exists(file_pattern):
- """Return True if there are files matching given bash pattern."""
- return len(rblf_wildcard(file_pattern)) > 0
-
def _find_and_copy(pattern, from_dir, to_dir):
"""Return a copy list for the files matching the pattern."""
return sorted([("%s/%s:%s/%s" % (from_dir, f, to_dir, f))
@@ -605,6 +604,21 @@
break
return res
+def _first_word(input):
+ """Equivalent to the GNU make function $(firstword)."""
+ input = __words(input)
+ if len(input) == 0:
+ return ""
+ return input[0]
+
+def _last_word(input):
+ """Equivalent to the GNU make function $(lastword)."""
+ input = __words(input)
+ l = len(input)
+ if l == 0:
+ return ""
+ return input[l-1]
+
def _dir(paths):
"""Equivalent to the GNU make function $(dir).
@@ -859,12 +873,12 @@
dir = _dir,
enforce_product_packages_exist = _enforce_product_packages_exist,
expand_wildcard = _expand_wildcard,
- file_exists = rblf_file_exists,
- file_wildcard_exists = _file_wildcard_exists,
filter = _filter,
filter_out = _filter_out,
find_and_copy = _find_and_copy,
findstring = _findstring,
+ first_word = _first_word,
+ last_word = _last_word,
inherit = _inherit,
indirect = _indirect,
mk2rbc_error = _mk2rbc_error,
diff --git a/core/soong_config.mk b/core/soong_config.mk
index d03b687..c84676b 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -170,6 +170,8 @@
$(call add_json_list, RecoverySnapshotDirsExcluded, $(RECOVERY_SNAPSHOT_DIRS_EXCLUDED))
$(call add_json_bool, HostFakeSnapshotEnabled, $(HOST_FAKE_SNAPSHOT_ENABLE))
+$(call add_json_bool, MultitreeUpdateMeta, $(filter true,$(TARGET_MULTITREE_UPDATE_META)))
+
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
diff --git a/core/tasks/multitree.mk b/core/tasks/multitree.mk
new file mode 100644
index 0000000..225477e
--- /dev/null
+++ b/core/tasks/multitree.mk
@@ -0,0 +1,16 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: update-meta
+update-meta: $(SOONG_MULTITREE_METADATA)
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index d129aa4..af7d1c0 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -104,7 +104,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2022-04-05
+ PLATFORM_SECURITY_PATCH := 2022-05-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/core/version_util.mk b/core/version_util.mk
index 3a0d4b5..cbfef96 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -56,36 +56,34 @@
# unreleased API level targetable by this branch, not just those that are valid
# lunch targets for this branch.
+PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
ifndef PLATFORM_VERSION_CODENAME
- PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
- ifndef PLATFORM_VERSION_CODENAME
- # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
- PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
- endif
-
- # This is all of the *active* development codenames.
- # This confusing name is needed because
- # all_codenames has been baked into build.prop for ages.
- #
- # Should be either the same as PLATFORM_VERSION_CODENAME or a comma-separated
- # list of additional codenames after PLATFORM_VERSION_CODENAME.
- PLATFORM_VERSION_ALL_CODENAMES :=
-
- # Build a list of all active code names. Avoid duplicates, and stop when we
- # reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
- # that is not included in our build).
- _versions_in_target := \
- $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
- $(foreach version,$(_versions_in_target),\
- $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
- $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
- $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
-
- # And convert from space separated to comma separated.
- PLATFORM_VERSION_ALL_CODENAMES := \
- $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
-
+ # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
+ PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
endif
+
+# This is all of the *active* development codenames.
+# This confusing name is needed because
+# all_codenames has been baked into build.prop for ages.
+#
+# Should be either the same as PLATFORM_VERSION_CODENAME or a comma-separated
+# list of additional codenames after PLATFORM_VERSION_CODENAME.
+PLATFORM_VERSION_ALL_CODENAMES :=
+
+# Build a list of all active code names. Avoid duplicates, and stop when we
+# reach a codename that matches PLATFORM_VERSION_CODENAME (anything beyond
+# that is not included in our build).
+_versions_in_target := \
+ $(call find_and_earlier,$(ALL_VERSIONS),$(TARGET_PLATFORM_VERSION))
+$(foreach version,$(_versions_in_target),\
+ $(eval _codename := $(PLATFORM_VERSION_CODENAME.$(version)))\
+ $(if $(filter $(_codename),$(PLATFORM_VERSION_ALL_CODENAMES)),,\
+ $(eval PLATFORM_VERSION_ALL_CODENAMES += $(_codename))))
+
+# And convert from space separated to comma separated.
+PLATFORM_VERSION_ALL_CODENAMES := \
+ $(subst $(space),$(comma),$(strip $(PLATFORM_VERSION_ALL_CODENAMES)))
+
.KATI_READONLY := \
PLATFORM_VERSION_CODENAME \
PLATFORM_VERSION_ALL_CODENAMES
diff --git a/envsetup.sh b/envsetup.sh
index e7b8538..b49bb8a 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -395,7 +395,7 @@
fi
local completion_files=(
- system/core/adb/adb.bash
+ packages/modules/adb/adb.bash
system/core/fastboot/fastboot.bash
tools/asuite/asuite.sh
)
@@ -404,7 +404,9 @@
# ENVSETUP_NO_COMPLETION=adb # -> disable adb completion
# ENVSETUP_NO_COMPLETION=adb:bit # -> disable adb and bit completion
for f in ${completion_files[*]}; do
- if [ -f "$f" ] && should_add_completion "$f"; then
+ if [ ! -f "$f" ]; then
+ echo "Warning: completion file $f not found"
+ elif should_add_completion "$f"; then
. $f
fi
done
diff --git a/finalize_branch_for_release.sh b/finalize_branch_for_release.sh
index 972ada1..8587b3a 100755
--- a/finalize_branch_for_release.sh
+++ b/finalize_branch_for_release.sh
@@ -17,12 +17,14 @@
# Update references in the codebase to new API version (TODO)
# ...
-AIDL_TRANSITIVE_FREEZE=true m aidl-freeze-api
+# Adding -j1 option because of file(Android.bp) race condition.
+AIDL_TRANSITIVE_FREEZE=true m aidl-freeze-api -j1
m check-vndk-list || update-vndk-list.sh # for new versions of AIDL interfaces
-# TODO(b/229413853): test while simulating 'rel' for more requirements AIDL_FROZEN_REL=true
-m # test build
+# for now, we simulate the release state for AIDL, but in the future, we would want
+# to actually turn the branch into the REL state and test with that
+AIDL_FROZEN_REL=true m # test build
# Build SDK (TODO)
# lunch sdk...
diff --git a/orchestrator/demo/buffet_helper.py b/orchestrator/demo/buffet_helper.py
new file mode 100644
index 0000000..fa29aeb
--- /dev/null
+++ b/orchestrator/demo/buffet_helper.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python3
+import os
+import sys
+import yaml
+
+from hierarchy import parse_hierarchy
+
+
+def main():
+ if len(sys.argv) != 2:
+ print('usage: %s target' % sys.argv[0])
+ exit(1)
+
+ args = sys.argv[1].split('-')
+ if len(args) != 2:
+ print('target format: {target}-{variant}')
+ exit(1)
+
+ target, variant = args
+
+ if variant not in ['eng', 'user', 'userdebug']:
+ print('unknown variant "%s": expected "eng", "user" or "userdebug"' %
+ variant)
+ exit(1)
+
+ build_top = os.getenv('BUFFET_BUILD_TOP')
+ if not build_top:
+ print('BUFFET_BUILD_TOP is not set; Did you correctly run envsetup.sh?')
+ exit(1)
+
+ hierarchy_map = parse_hierarchy(build_top)
+
+ if target not in hierarchy_map:
+ raise RuntimeError(
+ "unknown target '%s': couldn't find the target. Supported targets are: %s"
+ % (target, list(hierarchy_map.keys())))
+
+ hierarchy = [target]
+ while hierarchy_map[hierarchy[-1]]:
+ hierarchy.append(hierarchy_map[hierarchy[-1]])
+
+ print('Target hierarchy for %s: %s' % (target, hierarchy))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/orchestrator/demo/build_helper.py b/orchestrator/demo/build_helper.py
new file mode 100644
index 0000000..c481f80
--- /dev/null
+++ b/orchestrator/demo/build_helper.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python3
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import copy
+import hierarchy
+import json
+import logging
+import filecmp
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import collect_metadata
+import utils
+
+BUILD_CMD_TO_ALL = (
+ 'clean',
+ 'installclean',
+ 'update-meta',
+)
+BUILD_ALL_EXEMPTION = (
+ 'art',
+)
+
+def get_supported_product(ctx, supported_products):
+ hierarchy_map = hierarchy.parse_hierarchy(ctx.build_top())
+ target = ctx.target_product()
+
+ while target not in supported_products:
+ if target not in hierarchy_map:
+ return None
+ target = hierarchy_map[target]
+ return target
+
+
+def parse_goals(ctx, metadata, goals):
+ """Parse goals and returns a map from each component to goals.
+
+ e.g.
+
+ "m main art timezone:foo timezone:bar" will return the following dict: {
+ "main": {"all"},
+ "art": {"all"},
+ "timezone": {"foo", "bar"},
+ }
+ """
+ # for now, goal should look like:
+ # {component} or {component}:{subgoal}
+
+ ret = collections.defaultdict(set)
+
+ for goal in goals:
+ # check if the command is for all components
+ if goal in BUILD_CMD_TO_ALL:
+ ret['all'].add(goal)
+ continue
+
+ # should be {component} or {component}:{subgoal}
+ try:
+ component, subgoal = goal.split(':') if ':' in goal else (goal, 'all')
+ except ValueError:
+ raise RuntimeError(
+ 'unknown goal: %s: should be {component} or {component}:{subgoal}' %
+ goal)
+ if component not in metadata:
+ raise RuntimeError('unknown goal: %s: component %s not found' %
+ (goal, component))
+ if not get_supported_product(ctx, metadata[component]['lunch_targets']):
+ raise RuntimeError("can't find matching target. Supported targets are: " +
+ str(metadata[component]['lunch_targets']))
+
+ ret[component].add(subgoal)
+
+ return ret
+
+
+def find_cycle(metadata):
+ """ Finds a cyclic dependency among components.
+
+ This is for debugging.
+ """
+ visited = set()
+ parent_node = dict()
+ in_stack = set()
+
+ # Returns a cycle if one is found
+ def dfs(node):
+ # visit_order[visit_time[node] - 1] == node
+ nonlocal visited, parent_node, in_stack
+
+ visited.add(node)
+ in_stack.add(node)
+ if 'deps' not in metadata[node]:
+ in_stack.remove(node)
+ return None
+ for next in metadata[node]['deps']:
+ # We found a cycle (next ~ node) if next is still in the stack
+ if next in in_stack:
+ cycle = [node]
+ while cycle[-1] != next:
+ cycle.append(parent_node[cycle[-1]])
+ return cycle
+
+ # Else, continue searching
+ if next in visited:
+ continue
+
+ parent_node[next] = node
+ result = dfs(next)
+ if result:
+ return result
+
+ in_stack.remove(node)
+ return None
+
+ for component in metadata:
+ if component in visited:
+ continue
+
+ result = dfs(component)
+ if result:
+ return result
+
+ return None
+
+
+def topological_sort_components(metadata):
+ """ Performs topological sort on components.
+
+ If A depends on B, B appears first.
+ """
+ # If A depends on B, we want B to appear before A. But the graph in metadata
+ # is represented as A -> B (B in metadata[A]['deps']). So we sort in the
+ # reverse order, and then reverse the result again to get the desired order.
+ indegree = collections.defaultdict(int)
+ for component in metadata:
+ if 'deps' not in metadata[component]:
+ continue
+ for dep in metadata[component]['deps']:
+ indegree[dep] += 1
+
+ component_queue = collections.deque()
+ for component in metadata:
+ if indegree[component] == 0:
+ component_queue.append(component)
+
+ result = []
+ while component_queue:
+ component = component_queue.popleft()
+ result.append(component)
+ if 'deps' not in metadata[component]:
+ continue
+ for dep in metadata[component]['deps']:
+ indegree[dep] -= 1
+ if indegree[dep] == 0:
+ component_queue.append(dep)
+
+ # If topological sort fails, there must be a cycle.
+ if len(result) != len(metadata):
+ cycle = find_cycle(metadata)
+ raise RuntimeError('circular dependency found among metadata: %s' % cycle)
+
+ return result[::-1]
+
+
+def add_dependency_goals(ctx, metadata, component, goals):
+ """ Adds goals that given component depends on."""
+ # For now, let's just add "all"
+ # TODO: add detailed goals (e.g. API build rules, library build rules, etc.)
+ if 'deps' not in metadata[component]:
+ return
+
+ for dep in metadata[component]['deps']:
+ goals[dep].add('all')
+
+
+def sorted_goals_with_dependencies(ctx, metadata, parsed_goals):
+ """ Analyzes the dependency graph among components, adds build commands for
+
+ dependencies, and then sorts the goals.
+
+ Returns a list of tuples: (component_name, set of subgoals).
+ Builds should be run in the list's order.
+ """
+ # TODO(inseob@): after topological sort, some components may be built in
+ # parallel.
+
+ topological_order = topological_sort_components(metadata)
+ combined_goals = copy.deepcopy(parsed_goals)
+
+ # Add build rules for each component's dependencies
+ # We do this in reverse order, so it can be transitive.
+ # e.g. if A depends on B and B depends on C, and we build A,
+ # C should also be built, in addition to B.
+ for component in topological_order[::-1]:
+ if component in combined_goals:
+ add_dependency_goals(ctx, metadata, component, combined_goals)
+
+ ret = []
+ for component in ['all'] + topological_order:
+ if component in combined_goals:
+ ret.append((component, combined_goals[component]))
+
+ return ret
+
+
+def run_build(ctx, metadata, component, subgoals):
+ build_cmd = metadata[component]['build_cmd']
+ out_dir = metadata[component]['out_dir']
+ default_goals = ''
+ if 'default_goals' in metadata[component]:
+ default_goals = metadata[component]['default_goals']
+
+ if 'all' in subgoals:
+ goal = default_goals
+ else:
+ goal = ' '.join(subgoals)
+
+ build_vars = ''
+ if 'update-meta' in subgoals:
+ build_vars = 'TARGET_MULTITREE_UPDATE_META=true'
+ # TODO(inseob@): shell escape
+ cmd = [
+ '/bin/bash', '-c',
+ 'source build/envsetup.sh && lunch %s-%s && %s %s %s' %
+ (get_supported_product(ctx, metadata[component]['lunch_targets']),
+ ctx.target_build_variant(), build_vars, build_cmd, goal)
+ ]
+ logging.debug('cwd: ' + metadata[component]['path'])
+ logging.debug('running build: ' + str(cmd))
+
+ subprocess.run(cmd, cwd=metadata[component]['path'], check=True)
+
+
+def run_build_all(ctx, metadata, subgoals):
+ for component in metadata:
+ if component in BUILD_ALL_EXEMPTION:
+ continue
+ run_build(ctx, metadata, component, subgoals)
+
+
+def find_components(metadata, predicate):
+ for component in metadata:
+ if predicate(component):
+ yield component
+
+
+def import_filegroups(metadata, component, exporting_component, target_file_pairs):
+ imported_filegroup_dir = os.path.join(metadata[component]['path'], 'imported', exporting_component)
+
+ bp_content = ''
+ for name, outpaths in target_file_pairs:
+ bp_content += ('filegroup {{\n'
+ ' name: "{fname}",\n'
+ ' srcs: [\n'.format(fname=name))
+ for outpath in outpaths:
+ bp_content += ' "{outfile}",\n'.format(outfile=os.path.basename(outpath))
+ bp_content += (' ],\n'
+ '}\n')
+
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ with open(os.path.join(tmp_dir, 'Android.bp'), 'w') as fout:
+ fout.write(bp_content)
+ for _, outpaths in target_file_pairs:
+ for outpath in outpaths:
+ os.symlink(os.path.join(metadata[exporting_component]['path'], outpath),
+ os.path.join(tmp_dir, os.path.basename(outpath)))
+ cmp_result = filecmp.dircmp(tmp_dir, imported_filegroup_dir)
+ if os.path.exists(imported_filegroup_dir) and len(
+ cmp_result.left_only) + len(cmp_result.right_only) + len(
+ cmp_result.diff_files) == 0:
+ # Files are identical, it doesn't need to be written
+ logging.info(
+ 'imported files exists and the contents are identical: {} -> {}'
+ .format(component, exporting_component))
+ continue
+ logging.info('creating symlinks for imported files: {} -> {}'.format(
+ component, exporting_component))
+ os.makedirs(imported_filegroup_dir, exist_ok=True)
+ shutil.rmtree(imported_filegroup_dir, ignore_errors=True)
+ shutil.move(tmp_dir, imported_filegroup_dir)
+
+
+def prepare_build(metadata, component):
+ imported_dir = os.path.join(metadata[component]['path'], 'imported')
+ if utils.META_DEPS not in metadata[component]:
+ if os.path.exists(imported_dir):
+ logging.debug('remove {}'.format(imported_dir))
+ shutil.rmtree(imported_dir)
+ return
+
+ imported_components = set()
+ for exp_comp in metadata[component][utils.META_DEPS]:
+ if utils.META_FILEGROUP in metadata[component][utils.META_DEPS][exp_comp]:
+ filegroups = metadata[component][utils.META_DEPS][exp_comp][utils.META_FILEGROUP]
+ target_file_pairs = []
+ for name in filegroups:
+ target_file_pairs.append((name, filegroups[name]))
+ import_filegroups(metadata, component, exp_comp, target_file_pairs)
+ imported_components.add(exp_comp)
+
+ # Remove directories that are not generated this time.
+ if os.path.exists(imported_dir):
+ if len(imported_components) == 0:
+ shutil.rmtree(imported_dir)
+ else:
+ for remove_target in set(os.listdir(imported_dir)) - imported_components:
+ logging.info('remove unnecessary imported dir: {}'.format(remove_target))
+ shutil.rmtree(os.path.join(imported_dir, remove_target))
+
+
+def main():
+ utils.set_logging_config(logging.DEBUG)
+ ctx = utils.get_build_context()
+
+ logging.info('collecting metadata')
+
+ utils.set_logging_config(True)
+
+ goals = sys.argv[1:]
+ if not goals:
+ logging.debug('empty goals. defaults to main')
+ goals = ['main']
+
+ logging.debug('goals: ' + str(goals))
+
+ # Force update the metadata for the 'update-meta' build
+ metadata_collector = collect_metadata.MetadataCollector(
+ ctx.components_top(), ctx.out_dir(),
+ collect_metadata.COMPONENT_METADATA_DIR,
+ collect_metadata.COMPONENT_METADATA_FILE,
+ force_update='update-meta' in goals)
+ metadata_collector.collect()
+
+ metadata = metadata_collector.get_metadata()
+ logging.debug('metadata: ' + str(metadata))
+
+ parsed_goals = parse_goals(ctx, metadata, goals)
+ logging.debug('parsed goals: ' + str(parsed_goals))
+
+ sorted_goals = sorted_goals_with_dependencies(ctx, metadata, parsed_goals)
+ logging.debug('sorted goals with deps: ' + str(sorted_goals))
+
+ for component, subgoals in sorted_goals:
+ if component == 'all':
+ run_build_all(ctx, metadata, subgoals)
+ continue
+ prepare_build(metadata, component)
+ run_build(ctx, metadata, component, subgoals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/orchestrator/demo/collect_metadata.py b/orchestrator/demo/collect_metadata.py
new file mode 100755
index 0000000..148167d
--- /dev/null
+++ b/orchestrator/demo/collect_metadata.py
@@ -0,0 +1,428 @@
+#!/usr/bin/env python3
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import copy
+import json
+import logging
+import os
+import sys
+import yaml
+from collections import defaultdict
+from typing import (
+ List,
+ Set,
+)
+
+import utils
+
+# SKIP_COMPONENT_SEARCH = (
+# 'tools',
+# )
+COMPONENT_METADATA_DIR = '.repo'
+COMPONENT_METADATA_FILE = 'treeinfo.yaml'
+GENERATED_METADATA_FILE = 'metadata.json'
+COMBINED_METADATA_FILENAME = 'multitree_meta.json'
+
+
+class Dep(object):
+ def __init__(self, name, component, deps_type):
+ self.name = name
+ self.component = component
+ self.type = deps_type
+ self.out_paths = list()
+
+
+class ExportedDep(Dep):
+ def __init__(self, name, component, deps_type):
+ super().__init__(name, component, deps_type)
+
+ def setOutputPaths(self, output_paths: list):
+ self.out_paths = output_paths
+
+
+class ImportedDep(Dep):
+ required_type_map = {
+ # import type: (required type, get imported module list)
+ utils.META_FILEGROUP: (utils.META_MODULES, True),
+ }
+
+ def __init__(self, name, component, deps_type, import_map):
+ super().__init__(name, component, deps_type)
+ self.exported_deps: Set[ExportedDep] = set()
+ self.imported_modules: List[str] = list()
+ self.required_type = deps_type
+ get_imported_module = False
+ if deps_type in ImportedDep.required_type_map:
+ self.required_type, get_imported_module = ImportedDep.required_type_map[deps_type]
+ if get_imported_module:
+ self.imported_modules = import_map[name]
+ else:
+ self.imported_modules.append(name)
+
+ def verify_and_add(self, exported: ExportedDep):
+ if self.required_type != exported.type:
+ raise RuntimeError(
+ '{comp} components imports {module} for {imp_type} but it is exported as {exp_type}.'
+ .format(comp=self.component, module=exported.name, imp_type=self.required_type, exp_type=exported.type))
+ self.exported_deps.add(exported)
+ self.out_paths.extend(exported.out_paths)
+ # Remove duplicates. We may not use set() which is not JSON serializable
+ self.out_paths = list(dict.fromkeys(self.out_paths))
+
+
+class MetadataCollector(object):
+ """Visit all component directories and collect the metadata from them.
+
+Example of metadata:
+==========
+build_cmd: m # build command for this component. 'm' if omitted
+out_dir: out # out dir of this component. 'out' if omitted
+exports:
+ libraries:
+ - name: libopenjdkjvm
+ - name: libopenjdkjvmd
+ build_cmd: mma # build command for libopenjdkjvmd if specified
+ out_dir: out/soong # out dir for libopenjdkjvmd if specified
+ - name: libctstiagent
+ APIs:
+ - api1
+ - api2
+imports:
+ libraries:
+ - lib1
+ - lib2
+ APIs:
+ - import_api1
+ - import_api2
+lunch_targets:
+ - arm64
+ - x86_64
+"""
+
+ def __init__(self, component_top, out_dir, meta_dir, meta_file, force_update=False):
+ if not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+
+ self.__component_top = component_top
+ self.__out_dir = out_dir
+ self.__metadata_path = os.path.join(meta_dir, meta_file)
+ self.__combined_metadata_path = os.path.join(self.__out_dir,
+ COMBINED_METADATA_FILENAME)
+ self.__force_update = force_update
+
+ self.__metadata = dict()
+ self.__map_exports = dict()
+ self.__component_set = set()
+
+ def collect(self):
+ """ Read precomputed combined metadata from the json file.
+
+ If any components have updated their metadata, update the metadata
+ information and the json file.
+ """
+ timestamp = self.__restore_metadata()
+ if timestamp and os.path.getmtime(__file__) > timestamp:
+ logging.info('Update the metadata as the orchestrator has been changed')
+ self.__force_update = True
+ self.__collect_from_components(timestamp)
+
+ def get_metadata(self):
+ """ Returns collected metadata from all components"""
+ if not self.__metadata:
+ logging.warning('Metadata is empty')
+ return copy.deepcopy(self.__metadata)
+
+ def __collect_from_components(self, timestamp):
+ """ Read metadata from all components
+
+ If any components have newer metadata files or are removed, update the
+ combined metadata.
+ """
+ metadata_updated = False
+ for component in os.listdir(self.__component_top):
+ # if component in SKIP_COMPONENT_SEARCH:
+ # continue
+ if self.__read_component_metadata(timestamp, component):
+ metadata_updated = True
+ if self.__read_generated_metadata(timestamp, component):
+ metadata_updated = True
+
+ deleted_components = set()
+ for meta in self.__metadata:
+ if meta not in self.__component_set:
+ logging.info('Component {} is removed'.format(meta))
+ deleted_components.add(meta)
+ metadata_updated = True
+ for meta in deleted_components:
+ del self.__metadata[meta]
+
+ if metadata_updated:
+ self.__update_dependencies()
+ self.__store_metadata()
+ logging.info('Metadata updated')
+
+ def __read_component_metadata(self, timestamp, component):
+ """ Search for the metadata file from a component.
+
+ If the metadata is modified, read the file and update the metadata.
+ """
+ component_path = os.path.join(self.__component_top, component)
+ metadata_file = os.path.join(component_path, self.__metadata_path)
+ logging.info(
+ 'Reading a metadata file from {} component ...'.format(component))
+ if not os.path.isfile(metadata_file):
+ logging.warning('Metadata file {} not found!'.format(metadata_file))
+ return False
+
+ self.__component_set.add(component)
+ if not self.__force_update and timestamp and timestamp > os.path.getmtime(metadata_file):
+ logging.info('... yaml not changed. Skip')
+ return False
+
+ with open(metadata_file) as f:
+ meta = yaml.load(f, Loader=yaml.SafeLoader)
+
+ meta['path'] = component_path
+ if utils.META_BUILDCMD not in meta:
+ meta[utils.META_BUILDCMD] = utils.DEFAULT_BUILDCMD
+ if utils.META_OUTDIR not in meta:
+ meta[utils.META_OUTDIR] = utils.DEFAULT_OUTDIR
+
+ if utils.META_IMPORTS not in meta:
+ meta[utils.META_IMPORTS] = defaultdict(dict)
+ if utils.META_EXPORTS not in meta:
+ meta[utils.META_EXPORTS] = defaultdict(dict)
+
+ self.__metadata[component] = meta
+ return True
+
+ def __read_generated_metadata(self, timestamp, component):
+ """ Read a metadata gerated by 'update-meta' build command from the soong build system
+
+ Soong generate the metadata that has the information of import/export module/files.
+ Build orchestrator read the generated metadata to collect the dependency information.
+
+ Generated metadata has the following format:
+ {
+ "Imported": {
+ "FileGroups": {
+ "<name_of_filegroup>": [
+ "<exported_module_name>",
+ ...
+ ],
+ ...
+ }
+ }
+ "Exported": {
+ "<exported_module_name>": [
+ "<output_file_path>",
+ ...
+ ],
+ ...
+ }
+ }
+ """
+ if component not in self.__component_set:
+ # skip reading generated metadata if the component metadata file was missing
+ return False
+ component_out = os.path.join(self.__component_top, component, self.__metadata[component][utils.META_OUTDIR])
+ generated_metadata_file = os.path.join(component_out, 'soong', 'multitree', GENERATED_METADATA_FILE)
+ if not os.path.isfile(generated_metadata_file):
+ logging.info('... Soong did not generated the metadata file. Skip')
+ return False
+ if not self.__force_update and timestamp and timestamp > os.path.getmtime(generated_metadata_file):
+ logging.info('... Soong generated metadata not changed. Skip')
+ return False
+
+ with open(generated_metadata_file, 'r') as gen_meta_json:
+ try:
+ gen_metadata = json.load(gen_meta_json)
+ except json.decoder.JSONDecodeError:
+ logging.warning('JSONDecodeError!!!: skip reading the {} file'.format(
+ generated_metadata_file))
+ return False
+
+ if utils.SOONG_IMPORTED in gen_metadata:
+ imported = gen_metadata[utils.SOONG_IMPORTED]
+ if utils.SOONG_IMPORTED_FILEGROUPS in imported:
+ self.__metadata[component][utils.META_IMPORTS][utils.META_FILEGROUP] = imported[utils.SOONG_IMPORTED_FILEGROUPS]
+ if utils.SOONG_EXPORTED in gen_metadata:
+ self.__metadata[component][utils.META_EXPORTS][utils.META_MODULES] = gen_metadata[utils.SOONG_EXPORTED]
+
+ return True
+
+ def __update_export_map(self):
+ """ Read metadata of all components and update the export map
+
+ 'libraries' and 'APIs' are special exproted types that are provided manually
+ from the .yaml metadata files. These need to be replaced with the implementation
+ in soong gerated metadata.
+ The export type 'module' is generated from the soong build system from the modules
+ with 'export: true' property. This export type includes a dictionary with module
+ names as keys and their output files as values. These output files will be used as
+ prebuilt sources when generating the imported modules.
+ """
+ self.__map_exports = dict()
+ for comp in self.__metadata:
+ if utils.META_EXPORTS not in self.__metadata[comp]:
+ continue
+ exports = self.__metadata[comp][utils.META_EXPORTS]
+
+ for export_type in exports:
+ for module in exports[export_type]:
+ if export_type == utils.META_LIBS:
+ name = module[utils.META_LIB_NAME]
+ else:
+ name = module
+
+ if name in self.__map_exports:
+ raise RuntimeError(
+ 'Exported libs conflict!!!: "{name}" in the {comp} component is already exported by the {prev} component.'
+ .format(name=name, comp=comp, prev=self.__map_exports[name][utils.EXP_COMPONENT]))
+ exported_deps = ExportedDep(name, comp, export_type)
+ if export_type == utils.META_MODULES:
+ exported_deps.setOutputPaths(exports[export_type][module])
+ self.__map_exports[name] = exported_deps
+
+ def __verify_and_add_dependencies(self, component):
+ """ Search all imported items from the export_map.
+
+ If any imported items are not provided by the other components, report
+ an error.
+ Otherwise, add the component dependency and update the exported information to the
+ import maps.
+ """
+ def verify_and_add_dependencies(imported_dep: ImportedDep):
+ for module in imported_dep.imported_modules:
+ if module not in self.__map_exports:
+ raise RuntimeError(
+ 'Imported item not found!!!: Imported module "{module}" in the {comp} component is not exported from any other components.'
+ .format(module=module, comp=imported_dep.component))
+ imported_dep.verify_and_add(self.__map_exports[module])
+
+ deps = self.__metadata[component][utils.META_DEPS]
+ exp_comp = self.__map_exports[module].component
+ if exp_comp not in deps:
+ deps[exp_comp] = defaultdict(defaultdict)
+ deps[exp_comp][imported_dep.type][imported_dep.name] = imported_dep.out_paths
+
+ self.__metadata[component][utils.META_DEPS] = defaultdict()
+ imports = self.__metadata[component][utils.META_IMPORTS]
+ for import_type in imports:
+ for module in imports[import_type]:
+ verify_and_add_dependencies(ImportedDep(module, component, import_type, imports[import_type]))
+
+ def __check_imports(self):
+ """ Search the export map to find the component to import libraries or APIs.
+
+ Update the 'deps' field that includes the dependent components.
+ """
+ for component in self.__metadata:
+ self.__verify_and_add_dependencies(component)
+ if utils.META_DEPS in self.__metadata[component]:
+ logging.debug('{comp} depends on {list} components'.format(
+ comp=component, list=self.__metadata[component][utils.META_DEPS]))
+
+ def __update_dependencies(self):
+ """ Generate a dependency graph for the components
+
+ Update __map_exports and the dependency graph with the maps.
+ """
+ self.__update_export_map()
+ self.__check_imports()
+
+ def __store_metadata(self):
+ """ Store the __metadata dictionary as json format"""
+ with open(self.__combined_metadata_path, 'w') as json_file:
+ json.dump(self.__metadata, json_file, indent=2)
+
+ def __restore_metadata(self):
+ """ Read the stored json file and return the time stamps of the
+
+ metadata file.
+ """
+ if not os.path.exists(self.__combined_metadata_path):
+ return None
+
+ with open(self.__combined_metadata_path, 'r') as json_file:
+ try:
+ self.__metadata = json.load(json_file)
+ except json.decoder.JSONDecodeError:
+ logging.warning('JSONDecodeError!!!: skip reading the {} file'.format(
+ self.__combined_metadata_path))
+ return None
+
+ logging.info('Metadata restored from {}'.format(
+ self.__combined_metadata_path))
+ self.__update_export_map()
+ return os.path.getmtime(self.__combined_metadata_path)
+
+
+def get_args():
+
+ def check_dir(path):
+ if os.path.exists(path) and os.path.isdir(path):
+ return os.path.normpath(path)
+ else:
+ raise argparse.ArgumentTypeError('\"{}\" is not a directory'.format(path))
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--component-top',
+ help='Scan all components under this directory.',
+ default=os.path.join(os.path.dirname(__file__), '../../../components'),
+ type=check_dir)
+ parser.add_argument(
+ '--meta-file',
+ help='Name of the metadata file.',
+ default=COMPONENT_METADATA_FILE,
+ type=str)
+ parser.add_argument(
+ '--meta-dir',
+ help='Each component has the metadata in this directory.',
+ default=COMPONENT_METADATA_DIR,
+ type=str)
+ parser.add_argument(
+ '--out-dir',
+ help='Out dir for the outer tree. The orchestrator stores the collected metadata in this directory.',
+ default=os.path.join(os.path.dirname(__file__), '../../../out'),
+ type=os.path.normpath)
+ parser.add_argument(
+ '--force',
+ '-f',
+ action='store_true',
+ help='Force to collect metadata',
+ )
+ parser.add_argument(
+ '--verbose',
+ '-v',
+ help='Increase output verbosity, e.g. "-v", "-vv".',
+ action='count',
+ default=0)
+ return parser.parse_args()
+
+
+def main():
+ args = get_args()
+ utils.set_logging_config(args.verbose)
+
+ metadata_collector = MetadataCollector(args.component_top, args.out_dir,
+ args.meta_dir, args.meta_file, args.force)
+ metadata_collector.collect()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/orchestrator/demo/envsetup.sh b/orchestrator/demo/envsetup.sh
new file mode 100644
index 0000000..902a37c
--- /dev/null
+++ b/orchestrator/demo/envsetup.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+function buffet()
+{
+ local product variant selection
+ if [[ $# -ne 1 ]]; then
+ echo "usage: buffet [target]" >&2
+ return 1
+ fi
+
+ selection=$1
+ product=${selection%%-*} # Trim everything after first dash
+ variant=${selection#*-} # Trim everything up to first dash
+
+ if [ -z "$product" ]
+ then
+ echo
+ echo "Invalid lunch combo: $selection"
+ return 1
+ fi
+
+ if [ -z "$variant" ]
+ then
+ if [[ "$product" =~ .*_(eng|user|userdebug) ]]
+ then
+ echo "Did you mean -${product/*_/}? (dash instead of underscore)"
+ fi
+ return 1
+ fi
+
+ BUFFET_BUILD_TOP=$(pwd) python3 tools/build/orchestrator/buffet_helper.py $1 || return 1
+
+ export BUFFET_BUILD_TOP=$(pwd)
+ export BUFFET_COMPONENTS_TOP=$BUFFET_BUILD_TOP/components
+ export BUFFET_TARGET_PRODUCT=$product
+ export BUFFET_TARGET_BUILD_VARIANT=$variant
+ export BUFFET_TARGET_BUILD_TYPE=release
+}
+
+function m()
+{
+ if [ -z "$BUFFET_BUILD_TOP" ]
+ then
+ echo "Run \"buffet [target]\" first"
+ return 1
+ fi
+ python3 $BUFFET_BUILD_TOP/tools/build/orchestrator/build_helper.py "$@"
+}
diff --git a/orchestrator/demo/hierarchy.py b/orchestrator/demo/hierarchy.py
new file mode 100644
index 0000000..ae1825c
--- /dev/null
+++ b/orchestrator/demo/hierarchy.py
@@ -0,0 +1,79 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import yaml
+
+
+def parse_hierarchy(build_top):
+ """Parse build hierarchy file from given build top directory, and returns a dict from child targets to parent targets.
+
+ Example of hierarchy file:
+ ==========
+ aosp_arm64:
+ - armv8
+ - aosp_cf_arm64_phone
+
+ armv8:
+ - aosp_oriole
+ - aosp_sunfish
+
+ aosp_oriole:
+ - oriole
+
+ aosp_sunfish:
+ - sunfish
+
+ oriole:
+ # leaf
+
+ sunfish:
+ # leaf
+ ==========
+
+ If we parse this yaml, we get a dict looking like:
+
+ {
+ "sunfish": "aosp_sunfish",
+ "oriole": "aosp_oriole",
+ "aosp_oriole": "armv8",
+ "aosp_sunfish": "armv8",
+ "armv8": "aosp_arm64",
+ "aosp_cf_arm64_phone": "aosp_arm64",
+ "aosp_arm64": None, # no parent
+ }
+ """
+ metadata_path = os.path.join(build_top, 'tools', 'build', 'hierarchy.yaml')
+ if not os.path.isfile(metadata_path):
+ raise RuntimeError("target metadata file %s doesn't exist" % metadata_path)
+
+ with open(metadata_path, 'r') as f:
+ hierarchy_yaml = yaml.load(f, Loader=yaml.SafeLoader)
+
+ hierarchy_map = dict()
+
+ for parent_target, child_targets in hierarchy_yaml.items():
+ if not child_targets:
+ # leaf
+ continue
+ for child_target in child_targets:
+ hierarchy_map[child_target] = parent_target
+
+ for parent_target in hierarchy_yaml:
+ # targets with no parent
+ if parent_target not in hierarchy_map:
+ hierarchy_map[parent_target] = None
+
+ return hierarchy_map
diff --git a/orchestrator/demo/hierarchy.yaml b/orchestrator/demo/hierarchy.yaml
new file mode 100644
index 0000000..cc6de4d
--- /dev/null
+++ b/orchestrator/demo/hierarchy.yaml
@@ -0,0 +1,37 @@
+# hierarchy of targets
+
+aosp_arm64:
+- armv8
+- aosp_cf_arm64_phone
+
+armv8:
+- mainline_modules_arm64
+
+mainline_modules_arm64:
+- aosp_oriole
+- aosp_sunfish
+- aosp_raven
+
+aosp_oriole:
+- oriole
+
+aosp_sunfish:
+- sunfish
+
+aosp_raven:
+- raven
+
+oriole:
+# leaf
+
+sunfish:
+# leaf
+
+raven:
+# leaf
+
+aosp_cf_arm64_phone:
+- cf_arm64_phone
+
+cf_arm64_phone:
+# leaf
diff --git a/orchestrator/demo/utils.py b/orchestrator/demo/utils.py
new file mode 100644
index 0000000..5dbbe4a
--- /dev/null
+++ b/orchestrator/demo/utils.py
@@ -0,0 +1,89 @@
+# Copyright (C) 2021 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import logging
+import os
+
+# default build configuration for each component
+DEFAULT_BUILDCMD = 'm'
+DEFAULT_OUTDIR = 'out'
+
+# yaml fields
+META_BUILDCMD = 'build_cmd'
+META_OUTDIR = 'out_dir'
+META_EXPORTS = 'exports'
+META_IMPORTS = 'imports'
+META_TARGETS = 'lunch_targets'
+META_DEPS = 'deps'
+# fields under 'exports' and 'imports'
+META_LIBS = 'libraries'
+META_APIS = 'APIs'
+META_FILEGROUP = 'filegroup'
+META_MODULES = 'modules'
+# fields under 'libraries'
+META_LIB_NAME = 'name'
+
+# fields for generated metadata file
+SOONG_IMPORTED = 'Imported'
+SOONG_IMPORTED_FILEGROUPS = 'FileGroups'
+SOONG_EXPORTED = 'Exported'
+
+# export map items
+EXP_COMPONENT = 'component'
+EXP_TYPE = 'type'
+EXP_OUTPATHS = 'outpaths'
+
+class BuildContext:
+
+ def __init__(self):
+ self._build_top = os.getenv('BUFFET_BUILD_TOP')
+ self._components_top = os.getenv('BUFFET_COMPONENTS_TOP')
+ self._target_product = os.getenv('BUFFET_TARGET_PRODUCT')
+ self._target_build_variant = os.getenv('BUFFET_TARGET_BUILD_VARIANT')
+ self._target_build_type = os.getenv('BUFFET_TARGET_BUILD_TYPE')
+ self._out_dir = os.path.join(self._build_top, 'out')
+
+ if not self._build_top:
+ raise RuntimeError("Can't find root. Did you run buffet?")
+
+ def build_top(self):
+ return self._build_top
+
+ def components_top(self):
+ return self._components_top
+
+ def target_product(self):
+ return self._target_product
+
+ def target_build_variant(self):
+ return self._target_build_variant
+
+ def target_build_type(self):
+ return self._target_build_type
+
+ def out_dir(self):
+ return self._out_dir
+
+
+def get_build_context():
+ return BuildContext()
+
+
+def set_logging_config(verbose_level):
+ verbose_map = (logging.WARNING, logging.INFO, logging.DEBUG)
+ verbosity = min(verbose_level, 2)
+ logging.basicConfig(
+ format='%(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
+ level=verbose_map[verbosity])
diff --git a/tests/run.rbc b/tests/run.rbc
index 2d35e85..107be09 100644
--- a/tests/run.rbc
+++ b/tests/run.rbc
@@ -81,6 +81,15 @@
assert_eq(cwd+"/foo/bar "+cwd+"/foo/baz", rblf.abspath("foo/bar foo/baz"))
assert_eq("/baz", rblf.abspath("/../../../../../../../../../../../../../../../../baz"))
+assert_eq("foo", rblf.first_word("foo bar"))
+assert_eq("foo", rblf.first_word(["foo", "bar"]))
+assert_eq("", rblf.first_word(""))
+assert_eq("", rblf.first_word([]))
+assert_eq("bar", rblf.last_word("foo bar"))
+assert_eq("bar", rblf.last_word(["foo", "bar"]))
+assert_eq("", rblf.last_word(""))
+assert_eq("", rblf.last_word([]))
+
assert_eq(
["build/make/tests/board.rbc", "build/make/tests/board_input_vars.rbc"],
rblf.expand_wildcard("build/make/tests/board*.rbc")
diff --git a/tools/rbcrun/host.go b/tools/rbcrun/host.go
index c6e89f0..32afa45 100644
--- a/tools/rbcrun/host.go
+++ b/tools/rbcrun/host.go
@@ -20,6 +20,7 @@
"os"
"os/exec"
"path/filepath"
+ "sort"
"strings"
"go.starlark.net/starlark"
@@ -111,19 +112,6 @@
return e.globals, e.err
}
-// fileExists returns True if file with given name exists.
-func fileExists(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
- kwargs []starlark.Tuple) (starlark.Value, error) {
- var path string
- if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &path); err != nil {
- return starlark.None, err
- }
- if _, err := os.Stat(path); err != nil {
- return starlark.False, nil
- }
- return starlark.True, nil
-}
-
// wildcard(pattern, top=None) expands shell's glob pattern. If 'top' is present,
// the 'top/pattern' is globbed and then 'top/' prefix is removed.
func wildcard(_ *starlark.Thread, b *starlark.Builtin, args starlark.Tuple,
@@ -150,6 +138,10 @@
files[i] = strings.TrimPrefix(files[i], prefix)
}
}
+ // Kati uses glob(3) with no flags, which means it's sorted
+ // because GLOB_NOSORT is not passed. Go's glob is not
+ // guaranteed to sort the results.
+ sort.Strings(files)
return makeStringList(files), nil
}
@@ -269,8 +261,6 @@
"struct": starlark.NewBuiltin("struct", starlarkstruct.Make),
"rblf_cli": structFromEnv(env),
"rblf_env": structFromEnv(os.Environ()),
- // To convert makefile's $(wildcard foo)
- "rblf_file_exists": starlark.NewBuiltin("rblf_file_exists", fileExists),
// To convert find-copy-subdir and product-copy-files-by pattern
"rblf_find_files": starlark.NewBuiltin("rblf_find_files", find),
// To convert makefile's $(shell cmd)
diff --git a/tools/rbcrun/testdata/file_ops.star b/tools/rbcrun/testdata/file_ops.star
index 50e39bf..2ee78fc 100644
--- a/tools/rbcrun/testdata/file_ops.star
+++ b/tools/rbcrun/testdata/file_ops.star
@@ -4,9 +4,6 @@
def test():
myname = "file_ops.star"
- assert.true(rblf_file_exists("."), "./ exists ")
- assert.true(rblf_file_exists(myname), "the file %s does exist" % myname)
- assert.true(not rblf_file_exists("no_such_file"), "the file no_such_file does not exist")
files = rblf_wildcard("*.star")
assert.true(myname in files, "expected %s in %s" % (myname, files))
files = rblf_wildcard("*.star", rblf_env.TEST_DATA_DIR)
diff --git a/tools/rbcrun/testdata/module1.star b/tools/rbcrun/testdata/module1.star
index 913fb7d..be04f75 100644
--- a/tools/rbcrun/testdata/module1.star
+++ b/tools/rbcrun/testdata/module1.star
@@ -2,6 +2,6 @@
load("assert.star", "assert")
# Make sure that builtins are defined for the loaded module, too
-assert.true(rblf_file_exists("module1.star"))
-assert.true(not rblf_file_exists("no_such file"))
+assert.true(rblf_wildcard("module1.star"))
+assert.true(not rblf_wildcard("no_such file"))
test = "module1"
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 5f74e2b..374babf 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -725,7 +725,7 @@
GZ = 2
-def _GetRamdiskFormat(info_dict):
+def GetRamdiskFormat(info_dict):
if info_dict.get('lz4_ramdisks') == 'true':
ramdisk_format = RamdiskFormat.LZ4
else:
@@ -834,7 +834,7 @@
# Load recovery fstab if applicable.
d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
- ramdisk_format = _GetRamdiskFormat(d)
+ ramdisk_format = GetRamdiskFormat(d)
# Tries to load the build props for all partitions with care_map, including
# system and vendor.
@@ -1188,10 +1188,14 @@
return " ".join(sorted(combined))
if (framework_dict.get("use_dynamic_partitions") !=
- "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
+ "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
merged_dict = {"use_dynamic_partitions": "true"}
+ # For keys-value pairs that are the same, copy to merged dict
+ for key in vendor_dict.keys():
+ if key in framework_dict and framework_dict[key] == vendor_dict[key]:
+ merged_dict[key] = vendor_dict[key]
merged_dict["dynamic_partition_list"] = uniq_concat(
framework_dict.get("dynamic_partition_list", ""),
@@ -1575,7 +1579,7 @@
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
- ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_format = GetRamdiskFormat(info_dict)
ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file,
ramdisk_format=ramdisk_format)
@@ -1856,7 +1860,7 @@
img = tempfile.NamedTemporaryFile()
- ramdisk_format = _GetRamdiskFormat(info_dict)
+ ramdisk_format = GetRamdiskFormat(info_dict)
ramdisk_img = _MakeRamdisk(sourcedir, ramdisk_format=ramdisk_format)
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
diff --git a/tools/releasetools/merge/merge_utils.py b/tools/releasetools/merge/merge_utils.py
index f623ad2..e253b02 100644
--- a/tools/releasetools/merge/merge_utils.py
+++ b/tools/releasetools/merge/merge_utils.py
@@ -100,20 +100,16 @@
has_error = False
# Check that partitions only come from one input.
- for partition in _FRAMEWORK_PARTITIONS.union(_VENDOR_PARTITIONS):
- image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
- in_framework = (
- any(item.startswith(partition) for item in OPTIONS.framework_item_list)
- or image_path in OPTIONS.framework_item_list)
- in_vendor = (
- any(item.startswith(partition) for item in OPTIONS.vendor_item_list) or
- image_path in OPTIONS.vendor_item_list)
- if in_framework and in_vendor:
- logger.error(
- 'Cannot extract items from %s for both the framework and vendor'
- ' builds. Please ensure only one merge config item list'
- ' includes %s.', partition, partition)
- has_error = True
+ framework_partitions = ItemListToPartitionSet(OPTIONS.framework_item_list)
+ vendor_partitions = ItemListToPartitionSet(OPTIONS.vendor_item_list)
+ from_both = framework_partitions.intersection(vendor_partitions)
+ if from_both:
+ logger.error(
+ 'Cannot extract items from the same partition in both the '
+ 'framework and vendor builds. Please ensure only one merge config '
+ 'item list (or inferred list) includes each partition: %s' %
+ ','.join(from_both))
+ has_error = True
if any([
key in OPTIONS.framework_misc_info_keys
@@ -131,7 +127,8 @@
# system partition). The following regex matches this and extracts the
# partition name.
-_PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/\*$')
+_PARTITION_ITEM_PATTERN = re.compile(r'^([A-Z_]+)/.*$')
+_IMAGE_PARTITION_PATTERN = re.compile(r'^IMAGES/(.*)\.img$')
def ItemListToPartitionSet(item_list):
@@ -154,62 +151,89 @@
partition_set = set()
for item in item_list:
- partition_match = _PARTITION_ITEM_PATTERN.search(item.strip())
- partition_tag = partition_match.group(
- 1).lower() if partition_match else None
-
- if partition_tag:
- partition_set.add(partition_tag)
+ for pattern in (_PARTITION_ITEM_PATTERN, _IMAGE_PARTITION_PATTERN):
+ partition_match = pattern.search(item.strip())
+ if partition_match:
+ partition = partition_match.group(1).lower()
+ # These directories in target-files are not actual partitions.
+ if partition not in ('meta', 'images'):
+ partition_set.add(partition)
return partition_set
# Partitions that are grabbed from the framework partial build by default.
_FRAMEWORK_PARTITIONS = {
- 'system', 'product', 'system_ext', 'system_other', 'root', 'system_dlkm'
-}
-# Partitions that are grabbed from the vendor partial build by default.
-_VENDOR_PARTITIONS = {
- 'vendor', 'odm', 'oem', 'boot', 'vendor_boot', 'recovery',
- 'prebuilt_images', 'radio', 'data', 'vendor_dlkm', 'odm_dlkm'
+ 'system', 'product', 'system_ext', 'system_other', 'root', 'system_dlkm',
+ 'vbmeta_system'
}
def InferItemList(input_namelist, framework):
- item_list = []
+ item_set = set()
- # Some META items are grabbed from partial builds directly.
+ # Some META items are always grabbed from partial builds directly.
# Others are combined in merge_meta.py.
if framework:
- item_list.extend([
+ item_set.update([
'META/liblz4.so',
'META/postinstall_config.txt',
'META/update_engine_config.txt',
'META/zucchini_config.txt',
])
else: # vendor
- item_list.extend([
+ item_set.update([
'META/kernel_configs.txt',
'META/kernel_version.txt',
'META/otakeys.txt',
+ 'META/pack_radioimages.txt',
'META/releasetools.py',
- 'OTA/android-info.txt',
])
# Grab a set of items for the expected partitions in the partial build.
- for partition in (_FRAMEWORK_PARTITIONS if framework else _VENDOR_PARTITIONS):
- for namelist in input_namelist:
- if namelist.startswith('%s/' % partition.upper()):
- fs_config_prefix = '' if partition == 'system' else '%s_' % partition
- item_list.extend([
- '%s/*' % partition.upper(),
- 'IMAGES/%s.img' % partition,
- 'IMAGES/%s.map' % partition,
- 'META/%sfilesystem_config.txt' % fs_config_prefix,
- ])
- break
+ seen_partitions = []
+ for namelist in input_namelist:
+ if namelist.endswith('/'):
+ continue
- return sorted(item_list)
+ partition = namelist.split('/')[0].lower()
+
+ # META items are grabbed above, or merged later.
+ if partition == 'meta':
+ continue
+
+ if partition == 'images':
+ image_partition, extension = os.path.splitext(os.path.basename(namelist))
+ if image_partition == 'vbmeta':
+ # Always regenerate vbmeta.img since it depends on hash information
+ # from both builds.
+ continue
+ if extension in ('.img', '.map'):
+ # Include image files in IMAGES/* if the partition comes from
+ # the expected set.
+ if (framework and image_partition in _FRAMEWORK_PARTITIONS) or (
+ not framework and image_partition not in _FRAMEWORK_PARTITIONS):
+ item_set.add(namelist)
+ elif not framework:
+ # Include all miscellaneous non-image files in IMAGES/* from
+ # the vendor build.
+ item_set.add(namelist)
+ continue
+
+ # Skip already-visited partitions.
+ if partition in seen_partitions:
+ continue
+ seen_partitions.append(partition)
+
+ if (framework and partition in _FRAMEWORK_PARTITIONS) or (
+ not framework and partition not in _FRAMEWORK_PARTITIONS):
+ fs_config_prefix = '' if partition == 'system' else '%s_' % partition
+ item_set.update([
+ '%s/*' % partition.upper(),
+ 'META/%sfilesystem_config.txt' % fs_config_prefix,
+ ])
+
+ return sorted(item_set)
def InferFrameworkMiscInfoKeys(input_namelist):
@@ -223,8 +247,8 @@
]
for partition in _FRAMEWORK_PARTITIONS:
- for namelist in input_namelist:
- if namelist.startswith('%s/' % partition.upper()):
+ for partition_dir in ('%s/' % partition.upper(), 'SYSTEM/%s/' % partition):
+ if partition_dir in input_namelist:
fs_type_prefix = '' if partition == 'system' else '%s_' % partition
keys.extend([
'avb_%s_hashtree_enable' % partition,
diff --git a/tools/releasetools/merge/test_merge_utils.py b/tools/releasetools/merge/test_merge_utils.py
index 1949050..eceb734 100644
--- a/tools/releasetools/merge/test_merge_utils.py
+++ b/tools/releasetools/merge/test_merge_utils.py
@@ -108,20 +108,27 @@
def test_ItemListToPartitionSet(self):
item_list = [
+ 'IMAGES/system_ext.img',
'META/apexkeys.txt',
'META/apkcerts.txt',
'META/filesystem_config.txt',
'PRODUCT/*',
'SYSTEM/*',
- 'SYSTEM_EXT/*',
+ 'SYSTEM/system_ext/*',
]
partition_set = merge_utils.ItemListToPartitionSet(item_list)
self.assertEqual(set(['product', 'system', 'system_ext']), partition_set)
def test_InferItemList_Framework(self):
zip_namelist = [
+ 'IMAGES/product.img',
+ 'IMAGES/product.map',
+ 'IMAGES/system.img',
+ 'IMAGES/system.map',
'SYSTEM/my_system_file',
'PRODUCT/my_product_file',
+ # Device does not use a separate system_ext partition.
+ 'SYSTEM/system_ext/system_ext_file',
]
item_list = merge_utils.InferItemList(zip_namelist, framework=True)
@@ -147,37 +154,55 @@
zip_namelist = [
'VENDOR/my_vendor_file',
'ODM/my_odm_file',
+ 'IMAGES/odm.img',
+ 'IMAGES/odm.map',
+ 'IMAGES/vendor.img',
+ 'IMAGES/vendor.map',
+ 'IMAGES/my_custom_image.img',
+ 'IMAGES/my_custom_file.txt',
+ 'IMAGES/vbmeta.img',
+ 'CUSTOM_PARTITION/my_custom_file',
+ # Leftover framework pieces that shouldn't be grabbed.
+ 'IMAGES/system.img',
+ 'SYSTEM/system_file',
]
item_list = merge_utils.InferItemList(zip_namelist, framework=False)
expected_vendor_item_list = [
+ 'CUSTOM_PARTITION/*',
+ 'IMAGES/my_custom_file.txt',
+ 'IMAGES/my_custom_image.img',
'IMAGES/odm.img',
'IMAGES/odm.map',
'IMAGES/vendor.img',
'IMAGES/vendor.map',
+ 'META/custom_partition_filesystem_config.txt',
'META/kernel_configs.txt',
'META/kernel_version.txt',
'META/odm_filesystem_config.txt',
'META/otakeys.txt',
+ 'META/pack_radioimages.txt',
'META/releasetools.py',
'META/vendor_filesystem_config.txt',
'ODM/*',
- 'OTA/android-info.txt',
'VENDOR/*',
]
self.assertEqual(item_list, expected_vendor_item_list)
def test_InferFrameworkMiscInfoKeys(self):
zip_namelist = [
- 'SYSTEM/my_system_file',
- 'SYSTEM_EXT/my_system_ext_file',
+ 'PRODUCT/',
+ 'SYSTEM/',
+ 'SYSTEM/system_ext/',
]
keys = merge_utils.InferFrameworkMiscInfoKeys(zip_namelist)
expected_keys = [
'ab_update',
+ 'avb_product_add_hashtree_footer_args',
+ 'avb_product_hashtree_enable',
'avb_system_add_hashtree_footer_args',
'avb_system_ext_add_hashtree_footer_args',
'avb_system_ext_hashtree_enable',
@@ -186,10 +211,13 @@
'avb_vbmeta_system_algorithm',
'avb_vbmeta_system_key_path',
'avb_vbmeta_system_rollback_index_location',
+ 'building_product_image',
'building_system_ext_image',
'building_system_image',
'default_system_dev_certificate',
'fs_type',
+ 'product_disable_sparse',
+ 'product_fs_type',
'system_disable_sparse',
'system_ext_disable_sparse',
'system_ext_fs_type',
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 522d489..66e850b 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -1208,6 +1208,8 @@
metadata.postcondition.partition_state)
if not ota_utils.IsZucchiniCompatible(source_file, target_file):
+ logger.warning(
+ "Builds doesn't support zucchini, or source/target don't have compatible zucchini versions. Disabling zucchini.")
OPTIONS.enable_zucchini = False
additional_args += ["--enable_zucchini",
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index 5d403dc..ef1dca2 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -22,7 +22,8 @@
import ota_metadata_pb2
from common import (ZipDelete, ZipClose, OPTIONS, MakeTempFile,
ZipWriteStr, BuildInfo, LoadDictionaryFromFile,
- SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps)
+ SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps,
+ GetRamdiskFormat)
logger = logging.getLogger(__name__)
@@ -371,15 +372,18 @@
for partition in PARTITIONS_WITH_BUILD_PROP:
partition_prop_key = "{}.build.prop".format(partition)
input_file = info_dict[partition_prop_key].input_file
+ ramdisk = GetRamdiskFormat(info_dict)
if isinstance(input_file, zipfile.ZipFile):
with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip:
info_dict[partition_prop_key] = \
PartitionBuildProps.FromInputFile(input_zip, partition,
- placeholder_values)
+ placeholder_values,
+ ramdisk)
else:
info_dict[partition_prop_key] = \
PartitionBuildProps.FromInputFile(input_file, partition,
- placeholder_values)
+ placeholder_values,
+ ramdisk)
info_dict["build.prop"] = info_dict["system.build.prop"]
build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts))
@@ -693,6 +697,7 @@
if os.path.exists(entry_path):
with open(entry_path, "r") as fp:
return fp.read()
- else:
- return ""
- return ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME) == ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ return False
+ sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME)
+ return sourceEntry and targetEntry and sourceEntry == targetEntry