Merge "Remove android.hardware.wifi from generic_system.mk" into main
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..cd5c426
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,36 @@
+// Copyright 2024 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+// Package the minimal files required to run envsetup.sh in the test
+// environment.
+genrule {
+    name: "envsetup_minimum.zip",
+    visibility: [
+        "//build/make/tests:__subpackages__",
+    ],
+    tools: [
+        "soong_zip",
+    ],
+    srcs: [
+        "envsetup.sh",
+        "shell_utils.sh",
+        "core/envsetup.mk",
+    ],
+    out: ["envsetup.zip"],
+    cmd: "$(location soong_zip) -o $(out) -D build/make",
+}
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index ce75150..97ecd33 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,2 +1,5 @@
 [Hook Scripts]
 do_not_use_DO_NOT_MERGE = ${REPO_ROOT}/build/soong/scripts/check_do_not_merge.sh ${PREUPLOAD_COMMIT}
+
+[Builtin Hooks]
+ktfmt = true
diff --git a/ci/Android.bp b/ci/Android.bp
new file mode 100644
index 0000000..066b83f
--- /dev/null
+++ b/ci/Android.bp
@@ -0,0 +1,85 @@
+// Copyright 2024 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+python_test_host {
+    name: "build_test_suites_test",
+    main: "build_test_suites_test.py",
+    pkg_path: "testdata",
+    srcs: [
+        "build_test_suites_test.py",
+    ],
+    libs: [
+        "build_test_suites",
+        "pyfakefs",
+        "ci_test_lib",
+    ],
+    test_options: {
+        unit_test: true,
+    },
+    data: [
+        ":py3-cmd",
+    ],
+    version: {
+        py3: {
+            embedded_launcher: true,
+        },
+    },
+}
+
+// This test is only intended to be run locally since it's slow, not hermetic,
+// and requires a lot of system state. It is therefore not marked as `unit_test`
+// and is not part of any test suite. Note that we also don't want to run this
+// test with Bazel since that would require disabling sandboxing and explicitly
+// passing in all the env vars we depend on via the command-line. The test
+// target could be configured to do so but it's not worth doing seeing that
+// we're moving away from Bazel.
+python_test_host {
+    name: "build_test_suites_local_test",
+    main: "build_test_suites_local_test.py",
+    srcs: [
+        "build_test_suites_local_test.py",
+    ],
+    libs: [
+        "build_test_suites",
+        "pyfakefs",
+        "ci_test_lib",
+    ],
+    test_config_template: "AndroidTest.xml.template",
+    test_options: {
+        unit_test: false,
+    },
+    version: {
+        py3: {
+            embedded_launcher: true,
+        },
+    },
+}
+
+python_library_host {
+    name: "build_test_suites",
+    srcs: [
+        "build_test_suites.py",
+    ],
+}
+
+python_library_host {
+    name: "ci_test_lib",
+    srcs: [
+        "ci_test_lib.py",
+    ],
+}
diff --git a/ci/AndroidTest.xml.template b/ci/AndroidTest.xml.template
new file mode 100644
index 0000000..81a3435
--- /dev/null
+++ b/ci/AndroidTest.xml.template
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<configuration>
+  <test class="com.android.tradefed.testtype.python.PythonBinaryHostTest">
+    <option name="par-file-name" value="{MODULE}"/>
+    <option name="use-test-output-file" value="false"/>
+    <option name="test-timeout" value="5m"/>
+  </test>
+</configuration>
diff --git a/ci/build_test_suites b/ci/build_test_suites
index 03f6731..5aaf2f4 100755
--- a/ci/build_test_suites
+++ b/ci/build_test_suites
@@ -1,4 +1,5 @@
 #!prebuilts/build-tools/linux-x86/bin/py3-cmd -B
+#
 # Copyright 2024, The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import sys
 import build_test_suites
+import sys
 
-build_test_suites.main(sys.argv)
+build_test_suites.main(sys.argv[1:])
diff --git a/ci/build_test_suites.py b/ci/build_test_suites.py
index 1d5b377..29ed50e 100644
--- a/ci/build_test_suites.py
+++ b/ci/build_test_suites.py
@@ -12,404 +12,115 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Script to build only the necessary modules for general-tests along
-
-with whatever other targets are passed in.
-"""
+"""Build script for the CI `test_suites` target."""
 
 import argparse
-from collections.abc import Sequence
-import json
+import logging
 import os
 import pathlib
-import re
 import subprocess
 import sys
-from typing import Any
-
-import test_mapping_module_retriever
 
 
-# List of modules that are always required to be in general-tests.zip
-REQUIRED_MODULES = frozenset(
-    ['cts-tradefed', 'vts-tradefed', 'compatibility-host-util', 'soong_zip']
-)
+class Error(Exception):
+
+  def __init__(self, message):
+    super().__init__(message)
 
 
-def build_test_suites(argv):
+class BuildFailureError(Error):
+
+  def __init__(self, return_code):
+    super().__init__(f'Build command failed with return code: f{return_code}')
+    self.return_code = return_code
+
+
+REQUIRED_ENV_VARS = frozenset(['TARGET_PRODUCT', 'TARGET_RELEASE', 'TOP'])
+SOONG_UI_EXE_REL_PATH = 'build/soong/soong_ui.bash'
+
+
+def get_top() -> pathlib.Path:
+  return pathlib.Path(os.environ['TOP'])
+
+
+def build_test_suites(argv: list[str]) -> int:
+  """Builds the general-tests and any other test suites passed in.
+
+  Args:
+    argv: The command line arguments passed in.
+
+  Returns:
+    The exit code of the build.
+  """
   args = parse_args(argv)
+  check_required_env()
 
-  if is_optimization_enabled():
-    # Call the class to map changed files to modules to build.
-    # TODO(lucafarsi): Move this into a replaceable class.
-    build_affected_modules(args)
-  else:
+  try:
     build_everything(args)
+  except BuildFailureError as e:
+    logging.error('Build command failed! Check build_log for details.')
+    return e.return_code
+
+  return 0
+
+
+def check_required_env():
+  """Check for required env vars.
+
+  Raises:
+    RuntimeError: If any required env vars are not found.
+  """
+  missing_env_vars = sorted(v for v in REQUIRED_ENV_VARS if v not in os.environ)
+
+  if not missing_env_vars:
+    return
+
+  t = ','.join(missing_env_vars)
+  raise Error(f'Missing required environment variables: {t}')
 
 
 def parse_args(argv):
   argparser = argparse.ArgumentParser()
+
   argparser.add_argument(
       'extra_targets', nargs='*', help='Extra test suites to build.'
   )
-  argparser.add_argument('--target_product')
-  argparser.add_argument('--target_release')
-  argparser.add_argument(
-      '--with_dexpreopt_boot_img_and_system_server_only', action='store_true'
-  )
-  argparser.add_argument('--change_info', nargs='?')
 
-  return argparser.parse_args()
-
-
-def is_optimization_enabled() -> bool:
-  # TODO(lucafarsi): switch back to building only affected general-tests modules
-  # in presubmit once ready.
-  # if os.environ.get('BUILD_NUMBER')[0] == 'P':
-  #   return True
-  return False
+  return argparser.parse_args(argv)
 
 
 def build_everything(args: argparse.Namespace):
+  """Builds all tests (regardless of whether they are needed).
+
+  Args:
+    args: The parsed arguments.
+
+  Raises:
+    BuildFailure: If the build command fails.
+  """
   build_command = base_build_command(args, args.extra_targets)
-  build_command.append('general-tests')
 
-  run_command(build_command, print_output=True)
-
-
-def build_affected_modules(args: argparse.Namespace):
-  modules_to_build = find_modules_to_build(
-      pathlib.Path(args.change_info), args.extra_required_modules
-  )
-
-  # Call the build command with everything.
-  build_command = base_build_command(args, args.extra_targets)
-  build_command.extend(modules_to_build)
-  # When not building general-tests we also have to build the general tests
-  # shared libs.
-  build_command.append('general-tests-shared-libs')
-
-  run_command(build_command, print_output=True)
-
-  zip_build_outputs(modules_to_build, args.target_release)
+  try:
+    run_command(build_command)
+  except subprocess.CalledProcessError as e:
+    raise BuildFailureError(e.returncode) from e
 
 
 def base_build_command(
     args: argparse.Namespace, extra_targets: set[str]
-) -> list:
+) -> list[str]:
+
   build_command = []
-  build_command.append('time')
-  build_command.append('./build/soong/soong_ui.bash')
+  build_command.append(get_top().joinpath(SOONG_UI_EXE_REL_PATH))
   build_command.append('--make-mode')
-  build_command.append('dist')
-  build_command.append('TARGET_PRODUCT=' + args.target_product)
-  build_command.append('TARGET_RELEASE=' + args.target_release)
-  if args.with_dexpreopt_boot_img_and_system_server_only:
-    build_command.append('WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY=true')
   build_command.extend(extra_targets)
 
   return build_command
 
 
-def run_command(
-    args: list[str],
-    env: dict[str, str] = os.environ,
-    print_output: bool = False,
-) -> str:
-  result = subprocess.run(
-      args=args,
-      text=True,
-      capture_output=True,
-      check=False,
-      env=env,
-  )
-  # If the process failed, print its stdout and propagate the exception.
-  if not result.returncode == 0:
-    print('Build command failed! output:')
-    print('stdout: ' + result.stdout)
-    print('stderr: ' + result.stderr)
-
-  result.check_returncode()
-
-  if print_output:
-    print(result.stdout)
-
-  return result.stdout
-
-
-def find_modules_to_build(
-    change_info: pathlib.Path, extra_required_modules: list[str]
-) -> set[str]:
-  changed_files = find_changed_files(change_info)
-
-  test_mappings = test_mapping_module_retriever.GetTestMappings(
-      changed_files, set()
-  )
-
-  # Soong_zip is required to generate the output zip so always build it.
-  modules_to_build = set(REQUIRED_MODULES)
-  if extra_required_modules:
-    modules_to_build.update(extra_required_modules)
-
-  modules_to_build.update(find_affected_modules(test_mappings, changed_files))
-
-  return modules_to_build
-
-
-def find_changed_files(change_info: pathlib.Path) -> set[str]:
-  with open(change_info) as change_info_file:
-    change_info_contents = json.load(change_info_file)
-
-  changed_files = set()
-
-  for change in change_info_contents['changes']:
-    project_path = change.get('projectPath') + '/'
-
-    for revision in change.get('revisions'):
-      for file_info in revision.get('fileInfos'):
-        changed_files.add(project_path + file_info.get('path'))
-
-  return changed_files
-
-
-def find_affected_modules(
-    test_mappings: dict[str, Any], changed_files: set[str]
-) -> set[str]:
-  modules = set()
-
-  # The test_mappings object returned by GetTestMappings is organized as
-  # follows:
-  # {
-  #   'test_mapping_file_path': {
-  #     'group_name' : [
-  #       'name': 'module_name',
-  #     ],
-  #   }
-  # }
-  for test_mapping in test_mappings.values():
-    for group in test_mapping.values():
-      for entry in group:
-        module_name = entry.get('name', None)
-
-        if not module_name:
-          continue
-
-        file_patterns = entry.get('file_patterns')
-        if not file_patterns:
-          modules.add(module_name)
-          continue
-
-        if matches_file_patterns(file_patterns, changed_files):
-          modules.add(module_name)
-          continue
-
-  return modules
-
-
-# TODO(lucafarsi): Share this logic with the original logic in
-# test_mapping_test_retriever.py
-def matches_file_patterns(
-    file_patterns: list[set], changed_files: set[str]
-) -> bool:
-  for changed_file in changed_files:
-    for pattern in file_patterns:
-      if re.search(pattern, changed_file):
-        return True
-
-  return False
-
-
-def zip_build_outputs(
-    modules_to_build: set[str], target_release: str
-):
-  src_top = os.environ.get('TOP', os.getcwd())
-
-  # Call dumpvars to get the necessary things.
-  # TODO(lucafarsi): Don't call soong_ui 4 times for this, --dumpvars-mode can
-  # do it but it requires parsing.
-  host_out_testcases = pathlib.Path(
-      get_soong_var('HOST_OUT_TESTCASES', target_release)
-  )
-  target_out_testcases = pathlib.Path(
-      get_soong_var('TARGET_OUT_TESTCASES', target_release)
-  )
-  product_out = pathlib.Path(get_soong_var('PRODUCT_OUT', target_release))
-  soong_host_out = pathlib.Path(get_soong_var('SOONG_HOST_OUT', target_release))
-  host_out = pathlib.Path(get_soong_var('HOST_OUT', target_release))
-  dist_dir = pathlib.Path(get_soong_var('DIST_DIR', target_release))
-
-  # Call the class to package the outputs.
-  # TODO(lucafarsi): Move this code into a replaceable class.
-  host_paths = []
-  target_paths = []
-  host_config_files = []
-  target_config_files = []
-  for module in modules_to_build:
-    host_path = os.path.join(host_out_testcases, module)
-    if os.path.exists(host_path):
-      host_paths.append(host_path)
-      collect_config_files(src_top, host_path, host_config_files)
-
-    target_path = os.path.join(target_out_testcases, module)
-    if os.path.exists(target_path):
-      target_paths.append(target_path)
-      collect_config_files(src_top, target_path, target_config_files)
-
-  zip_test_configs_zips(
-      dist_dir, host_out, product_out, host_config_files, target_config_files
-  )
-
-  zip_command = base_zip_command(host_out, dist_dir, 'general-tests.zip')
-
-  # Add host testcases.
-  zip_command.append('-C')
-  zip_command.append(os.path.join(src_top, soong_host_out))
-  zip_command.append('-P')
-  zip_command.append('host/')
-  for path in host_paths:
-    zip_command.append('-D')
-    zip_command.append(path)
-
-  # Add target testcases.
-  zip_command.append('-C')
-  zip_command.append(os.path.join(src_top, product_out))
-  zip_command.append('-P')
-  zip_command.append('target')
-  for path in target_paths:
-    zip_command.append('-D')
-    zip_command.append(path)
-
-  # TODO(lucafarsi): Push this logic into a general-tests-minimal build command
-  # Add necessary tools. These are also hardcoded in general-tests.mk.
-  framework_path = os.path.join(soong_host_out, 'framework')
-
-  zip_command.append('-C')
-  zip_command.append(framework_path)
-  zip_command.append('-P')
-  zip_command.append('host/tools')
-  zip_command.append('-f')
-  zip_command.append(os.path.join(framework_path, 'cts-tradefed.jar'))
-  zip_command.append('-f')
-  zip_command.append(
-      os.path.join(framework_path, 'compatibility-host-util.jar')
-  )
-  zip_command.append('-f')
-  zip_command.append(os.path.join(framework_path, 'vts-tradefed.jar'))
-
-  run_command(zip_command, print_output=True)
-
-
-def collect_config_files(
-    src_top: pathlib.Path, root_dir: pathlib.Path, config_files: list[str]
-):
-  for root, dirs, files in os.walk(os.path.join(src_top, root_dir)):
-    for file in files:
-      if file.endswith('.config'):
-        config_files.append(os.path.join(root_dir, file))
-
-
-def base_zip_command(
-    host_out: pathlib.Path, dist_dir: pathlib.Path, name: str
-) -> list[str]:
-  return [
-      'time',
-      os.path.join(host_out, 'bin', 'soong_zip'),
-      '-d',
-      '-o',
-      os.path.join(dist_dir, name),
-  ]
-
-
-# generate general-tests_configs.zip which contains all of the .config files
-# that were built and general-tests_list.zip which contains a text file which
-# lists all of the .config files that are in general-tests_configs.zip.
-#
-# general-tests_comfigs.zip is organized as follows:
-# /
-#   host/
-#     testcases/
-#       test_1.config
-#       test_2.config
-#       ...
-#   target/
-#     testcases/
-#       test_1.config
-#       test_2.config
-#       ...
-#
-# So the process is we write out the paths to all the host config files into one
-# file and all the paths to the target config files in another. We also write
-# the paths to all the config files into a third file to use for
-# general-tests_list.zip.
-def zip_test_configs_zips(
-    dist_dir: pathlib.Path,
-    host_out: pathlib.Path,
-    product_out: pathlib.Path,
-    host_config_files: list[str],
-    target_config_files: list[str],
-):
-  with open(
-      os.path.join(host_out, 'host_general-tests_list'), 'w'
-  ) as host_list_file, open(
-      os.path.join(product_out, 'target_general-tests_list'), 'w'
-  ) as target_list_file, open(
-      os.path.join(host_out, 'general-tests_list'), 'w'
-  ) as list_file:
-
-    for config_file in host_config_files:
-      host_list_file.write(config_file + '\n')
-      list_file.write('host/' + os.path.relpath(config_file, host_out) + '\n')
-
-    for config_file in target_config_files:
-      target_list_file.write(config_file + '\n')
-      list_file.write(
-          'target/' + os.path.relpath(config_file, product_out) + '\n'
-      )
-
-  tests_config_zip_command = base_zip_command(
-      host_out, dist_dir, 'general-tests_configs.zip'
-  )
-  tests_config_zip_command.append('-P')
-  tests_config_zip_command.append('host')
-  tests_config_zip_command.append('-C')
-  tests_config_zip_command.append(host_out)
-  tests_config_zip_command.append('-l')
-  tests_config_zip_command.append(
-      os.path.join(host_out, 'host_general-tests_list')
-  )
-  tests_config_zip_command.append('-P')
-  tests_config_zip_command.append('target')
-  tests_config_zip_command.append('-C')
-  tests_config_zip_command.append(product_out)
-  tests_config_zip_command.append('-l')
-  tests_config_zip_command.append(
-      os.path.join(product_out, 'target_general-tests_list')
-  )
-  run_command(tests_config_zip_command, print_output=True)
-
-  tests_list_zip_command = base_zip_command(
-      host_out, dist_dir, 'general-tests_list.zip'
-  )
-  tests_list_zip_command.append('-C')
-  tests_list_zip_command.append(host_out)
-  tests_list_zip_command.append('-f')
-  tests_list_zip_command.append(os.path.join(host_out, 'general-tests_list'))
-  run_command(tests_list_zip_command, print_output=True)
-
-
-def get_soong_var(var: str, target_release: str) -> str:
-  new_env = os.environ.copy()
-  new_env['TARGET_RELEASE'] = target_release
-
-  value = run_command(
-      ['./build/soong/soong_ui.bash', '--dumpvar-mode', '--abs', var],
-      env=new_env,
-  ).strip()
-  if not value:
-    raise RuntimeError('Necessary soong variable ' + var + ' not found.')
-
-  return value
+def run_command(args: list[str], stdout=None):
+  subprocess.run(args=args, check=True, stdout=stdout)
 
 
 def main(argv):
-  build_test_suites(argv)
+  sys.exit(build_test_suites(argv))
diff --git a/ci/build_test_suites_local_test.py b/ci/build_test_suites_local_test.py
new file mode 100644
index 0000000..78e52d3
--- /dev/null
+++ b/ci/build_test_suites_local_test.py
@@ -0,0 +1,123 @@
+# Copyright 2024, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Integration tests for build_test_suites that require a local build env."""
+
+import os
+import pathlib
+import shutil
+import signal
+import subprocess
+import tempfile
+import time
+import ci_test_lib
+
+
+class BuildTestSuitesLocalTest(ci_test_lib.TestCase):
+
+  def setUp(self):
+    self.top_dir = pathlib.Path(os.environ['ANDROID_BUILD_TOP']).resolve()
+    self.executable = self.top_dir.joinpath('build/make/ci/build_test_suites')
+    self.process_session = ci_test_lib.TemporaryProcessSession(self)
+    self.temp_dir = ci_test_lib.TestTemporaryDirectory.create(self)
+
+  def build_subprocess_args(self, build_args: list[str]):
+    env = os.environ.copy()
+    env['TOP'] = str(self.top_dir)
+    env['OUT_DIR'] = self.temp_dir
+
+    args = ([self.executable] + build_args,)
+    kwargs = {
+        'cwd': self.top_dir,
+        'env': env,
+        'text': True,
+    }
+
+    return (args, kwargs)
+
+  def run_build(self, build_args: list[str]) -> subprocess.CompletedProcess:
+    args, kwargs = self.build_subprocess_args(build_args)
+
+    return subprocess.run(
+        *args,
+        **kwargs,
+        check=True,
+        capture_output=True,
+        timeout=5 * 60,
+    )
+
+  def assert_children_alive(self, children: list[int]):
+    for c in children:
+      self.assertTrue(ci_test_lib.process_alive(c))
+
+  def assert_children_dead(self, children: list[int]):
+    for c in children:
+      self.assertFalse(ci_test_lib.process_alive(c))
+
+  def test_fails_for_invalid_arg(self):
+    invalid_arg = '--invalid-arg'
+
+    with self.assertRaises(subprocess.CalledProcessError) as cm:
+      self.run_build([invalid_arg])
+
+    self.assertIn(invalid_arg, cm.exception.stderr)
+
+  def test_builds_successfully(self):
+    self.run_build(['nothing'])
+
+  def test_can_interrupt_build(self):
+    args, kwargs = self.build_subprocess_args(['general-tests'])
+    p = self.process_session.create(args, kwargs)
+
+    # TODO(lucafarsi): Replace this (and other instances) with a condition.
+    time.sleep(5)  # Wait for the build to get going.
+    self.assertIsNone(p.poll())  # Check that the process is still alive.
+    children = query_child_pids(p.pid)
+    self.assert_children_alive(children)
+
+    p.send_signal(signal.SIGINT)
+    p.wait()
+
+    time.sleep(5)  # Wait for things to die out.
+    self.assert_children_dead(children)
+
+  def test_can_kill_build_process_group(self):
+    args, kwargs = self.build_subprocess_args(['general-tests'])
+    p = self.process_session.create(args, kwargs)
+
+    time.sleep(5)  # Wait for the build to get going.
+    self.assertIsNone(p.poll())  # Check that the process is still alive.
+    children = query_child_pids(p.pid)
+    self.assert_children_alive(children)
+
+    os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+    p.wait()
+
+    time.sleep(5)  # Wait for things to die out.
+    self.assert_children_dead(children)
+
+
+# TODO(hzalek): Replace this with `psutils` once available in the tree.
+def query_child_pids(parent_pid: int) -> set[int]:
+  p = subprocess.run(
+      ['pgrep', '-P', str(parent_pid)],
+      check=True,
+      capture_output=True,
+      text=True,
+  )
+  return {int(pid) for pid in p.stdout.splitlines()}
+
+
+if __name__ == '__main__':
+  ci_test_lib.main()
diff --git a/ci/build_test_suites_test.py b/ci/build_test_suites_test.py
new file mode 100644
index 0000000..08a79a3
--- /dev/null
+++ b/ci/build_test_suites_test.py
@@ -0,0 +1,254 @@
+# Copyright 2024, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for build_test_suites.py"""
+
+from importlib import resources
+import multiprocessing
+import os
+import pathlib
+import shutil
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+from typing import Callable
+from unittest import mock
+import build_test_suites
+import ci_test_lib
+from pyfakefs import fake_filesystem_unittest
+
+
+class BuildTestSuitesTest(fake_filesystem_unittest.TestCase):
+
+  def setUp(self):
+    self.setUpPyfakefs()
+
+    os_environ_patcher = mock.patch.dict('os.environ', {})
+    self.addCleanup(os_environ_patcher.stop)
+    self.mock_os_environ = os_environ_patcher.start()
+
+    subprocess_run_patcher = mock.patch('subprocess.run')
+    self.addCleanup(subprocess_run_patcher.stop)
+    self.mock_subprocess_run = subprocess_run_patcher.start()
+
+    self._setup_working_build_env()
+
+  def test_missing_target_release_env_var_raises(self):
+    del os.environ['TARGET_RELEASE']
+
+    with self.assert_raises_word(build_test_suites.Error, 'TARGET_RELEASE'):
+      build_test_suites.main([])
+
+  def test_missing_target_product_env_var_raises(self):
+    del os.environ['TARGET_PRODUCT']
+
+    with self.assert_raises_word(build_test_suites.Error, 'TARGET_PRODUCT'):
+      build_test_suites.main([])
+
+  def test_missing_top_env_var_raises(self):
+    del os.environ['TOP']
+
+    with self.assert_raises_word(build_test_suites.Error, 'TOP'):
+      build_test_suites.main([])
+
+  def test_invalid_arg_raises(self):
+    invalid_args = ['--invalid_arg']
+
+    with self.assertRaisesRegex(SystemExit, '2'):
+      build_test_suites.main(invalid_args)
+
+  def test_build_failure_returns(self):
+    self.mock_subprocess_run.side_effect = subprocess.CalledProcessError(
+        42, None
+    )
+
+    with self.assertRaisesRegex(SystemExit, '42'):
+      build_test_suites.main([])
+
+  def test_build_success_returns(self):
+    with self.assertRaisesRegex(SystemExit, '0'):
+      build_test_suites.main([])
+
+  def assert_raises_word(self, cls, word):
+    return self.assertRaisesRegex(build_test_suites.Error, rf'\b{word}\b')
+
+  def _setup_working_build_env(self):
+    self.fake_top = pathlib.Path('/fake/top')
+    self.fake_top.mkdir(parents=True)
+
+    self.soong_ui_dir = self.fake_top.joinpath('build/soong')
+    self.soong_ui_dir.mkdir(parents=True, exist_ok=True)
+
+    self.soong_ui = self.soong_ui_dir.joinpath('soong_ui.bash')
+    self.soong_ui.touch()
+
+    self.mock_os_environ.update({
+        'TARGET_RELEASE': 'release',
+        'TARGET_PRODUCT': 'product',
+        'TOP': str(self.fake_top),
+    })
+
+    self.mock_subprocess_run.return_value = 0
+
+
+class RunCommandIntegrationTest(ci_test_lib.TestCase):
+
+  def setUp(self):
+    self.temp_dir = ci_test_lib.TestTemporaryDirectory.create(self)
+
+    # Copy the Python executable from 'non-code' resources and make it
+    # executable for use by tests that launch a subprocess. Note that we don't
+    # use Python's native `sys.executable` property since that is not set when
+    # running via the embedded launcher.
+    base_name = 'py3-cmd'
+    dest_file = self.temp_dir.joinpath(base_name)
+    with resources.as_file(
+        resources.files('testdata').joinpath(base_name)
+    ) as p:
+      shutil.copy(p, dest_file)
+    dest_file.chmod(dest_file.stat().st_mode | stat.S_IEXEC)
+    self.python_executable = dest_file
+
+    self._managed_processes = []
+
+  def tearDown(self):
+    self._terminate_managed_processes()
+
+  def test_raises_on_nonzero_exit(self):
+    with self.assertRaises(Exception):
+      build_test_suites.run_command([
+          self.python_executable,
+          '-c',
+          textwrap.dedent(f"""\
+              import sys
+              sys.exit(1)
+              """),
+      ])
+
+  def test_streams_stdout(self):
+
+    def run_slow_command(stdout_file, marker):
+      with open(stdout_file, 'w') as f:
+        build_test_suites.run_command(
+            [
+                self.python_executable,
+                '-c',
+                textwrap.dedent(f"""\
+                  import time
+
+                  print('{marker}', end='', flush=True)
+
+                  # Keep process alive until we check stdout.
+                  time.sleep(10)
+                  """),
+            ],
+            stdout=f,
+        )
+
+    marker = 'Spinach'
+    stdout_file = self.temp_dir.joinpath('stdout.txt')
+
+    p = self.start_process(target=run_slow_command, args=[stdout_file, marker])
+
+    self.assert_file_eventually_contains(stdout_file, marker)
+
+  def test_propagates_interruptions(self):
+
+    def run(pid_file):
+      build_test_suites.run_command([
+          self.python_executable,
+          '-c',
+          textwrap.dedent(f"""\
+              import os
+              import pathlib
+              import time
+
+              pathlib.Path('{pid_file}').write_text(str(os.getpid()))
+
+              # Keep the process alive for us to explicitly interrupt it.
+              time.sleep(10)
+              """),
+      ])
+
+    pid_file = self.temp_dir.joinpath('pid.txt')
+    p = self.start_process(target=run, args=[pid_file])
+    subprocess_pid = int(read_eventual_file_contents(pid_file))
+
+    os.kill(p.pid, signal.SIGINT)
+    p.join()
+
+    self.assert_process_eventually_dies(p.pid)
+    self.assert_process_eventually_dies(subprocess_pid)
+
+  def start_process(self, *args, **kwargs) -> multiprocessing.Process:
+    p = multiprocessing.Process(*args, **kwargs)
+    self._managed_processes.append(p)
+    p.start()
+    return p
+
+  def assert_process_eventually_dies(self, pid: int):
+    try:
+      wait_until(lambda: not ci_test_lib.process_alive(pid))
+    except TimeoutError as e:
+      self.fail(f'Process {pid} did not die after a while: {e}')
+
+  def assert_file_eventually_contains(self, file: pathlib.Path, substring: str):
+    wait_until(lambda: file.is_file() and file.stat().st_size > 0)
+    self.assertIn(substring, read_file_contents(file))
+
+  def _terminate_managed_processes(self):
+    for p in self._managed_processes:
+      if not p.is_alive():
+        continue
+
+      # We terminate the process with `SIGINT` since using `terminate` or
+      # `SIGKILL` doesn't kill any grandchild processes and we don't have
+      # `psutil` available to easily query all children.
+      os.kill(p.pid, signal.SIGINT)
+
+
+def wait_until(
+    condition_function: Callable[[], bool],
+    timeout_secs: float = 3.0,
+    polling_interval_secs: float = 0.1,
+):
+  """Waits until a condition function returns True."""
+
+  start_time_secs = time.time()
+
+  while not condition_function():
+    if time.time() - start_time_secs > timeout_secs:
+      raise TimeoutError(
+          f'Condition not met within timeout: {timeout_secs} seconds'
+      )
+
+    time.sleep(polling_interval_secs)
+
+
+def read_file_contents(file: pathlib.Path) -> str:
+  with open(file, 'r') as f:
+    return f.read()
+
+
+def read_eventual_file_contents(file: pathlib.Path) -> str:
+  wait_until(lambda: file.is_file() and file.stat().st_size > 0)
+  return read_file_contents(file)
+
+
+if __name__ == '__main__':
+  ci_test_lib.main()
diff --git a/ci/ci_test_lib.py b/ci/ci_test_lib.py
new file mode 100644
index 0000000..2d70d3f
--- /dev/null
+++ b/ci/ci_test_lib.py
@@ -0,0 +1,86 @@
+# Copyright 2024, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Testing utilities for tests in the CI package."""
+
+import logging
+import os
+import unittest
+import subprocess
+import pathlib
+import shutil
+import tempfile
+
+
+# Export the TestCase class to reduce the number of imports tests have to list.
+TestCase = unittest.TestCase
+
+
+def process_alive(pid):
+  """Check For the existence of a pid."""
+
+  try:
+    os.kill(pid, 0)
+  except OSError:
+    return False
+
+  return True
+
+
+class TemporaryProcessSession:
+
+  def __init__(self, test_case: TestCase):
+    self._created_processes = []
+    test_case.addCleanup(self.cleanup)
+
+  def create(self, args, kwargs):
+    p = subprocess.Popen(*args, **kwargs, start_new_session=True)
+    self._created_processes.append(p)
+    return p
+
+  def cleanup(self):
+    for p in self._created_processes:
+      if not process_alive(p.pid):
+        return
+      os.killpg(os.getpgid(p.pid), signal.SIGKILL)
+
+
+class TestTemporaryDirectory:
+
+  def __init__(self, delete: bool, ):
+    self._delete = delete
+
+  @classmethod
+  def create(cls, test_case: TestCase, delete: bool = True):
+    temp_dir = TestTemporaryDirectory(delete)
+    temp_dir._dir = pathlib.Path(tempfile.mkdtemp())
+    test_case.addCleanup(temp_dir.cleanup)
+    return temp_dir._dir
+
+  def get_dir(self):
+    return self._dir
+
+  def cleanup(self):
+    if not self._delete:
+      return
+    shutil.rmtree(self._dir, ignore_errors=True)
+
+
+def main():
+
+  # Disable logging since it breaks the TF Python test output parser.
+  # TODO(hzalek): Use TF's `test-output-file` option to re-enable logging.
+  logging.getLogger().disabled = True
+
+  unittest.main()
diff --git a/core/Makefile b/core/Makefile
index 9d77ec1..d09f54f 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -1469,8 +1469,13 @@
 boototapackage_4k: $(BUILT_BOOT_OTA_PACKAGE_4K)
 .PHONY: boototapackage_4k
 
+ifeq ($(BOARD_16K_OTA_MOVE_VENDOR),true)
+$(eval $(call copy-one-file,$(BUILT_BOOT_OTA_PACKAGE_4K),$(TARGET_OUT_VENDOR)/boot_otas/boot_ota_4k.zip))
+$(eval $(call copy-one-file,$(BUILT_BOOT_OTA_PACKAGE_16K),$(TARGET_OUT_VENDOR)/boot_otas/boot_ota_16k.zip))
+else
 $(eval $(call copy-one-file,$(BUILT_BOOT_OTA_PACKAGE_4K),$(TARGET_OUT)/boot_otas/boot_ota_4k.zip))
 $(eval $(call copy-one-file,$(BUILT_BOOT_OTA_PACKAGE_16K),$(TARGET_OUT)/boot_otas/boot_ota_16k.zip))
+endif # BOARD_16K_OTA_MOVE_VENDOR == true
 
 ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT)/boot_otas/boot_ota_4k.zip
 ALL_DEFAULT_INSTALLED_MODULES += $(TARGET_OUT)/boot_otas/boot_ota_16k.zip
@@ -6206,6 +6211,8 @@
     echo "virtual_ab_retrofit=true" >> $(1))
   $(if $(PRODUCT_VIRTUAL_AB_COW_VERSION), \
     echo "virtual_ab_cow_version=$(PRODUCT_VIRTUAL_AB_COW_VERSION)" >> $(1))
+  $(if $(PRODUCT_VIRTUAL_AB_COMPRESSION_FACTOR), \
+    echo "virtual_ab_compression_factor=$(PRODUCT_VIRTUAL_AB_COMPRESSION_FACTOR)" >> $(1))
 endef
 
 # Copy an image file to a directory and generate a block list map file from the image,
diff --git a/core/OWNERS b/core/OWNERS
index 1c3d017..35ea83d 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -11,5 +11,3 @@
 # For Ravenwood test configs
 per-file ravenwood_test_config_template.xml = jsharkey@google.com,omakoto@google.com
 
-# For binary_translation
-per-file berberis_test.mk = levarum@google.com,khim@google.com,dimitry@google.com
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index 4e42a21..b24d304 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -44,47 +44,9 @@
   BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := false
 endif
 
-ifneq ($(SANITIZE_TARGET)$(EMMA_INSTRUMENT_FRAMEWORK),)
-  # Always use sources when building the framework with Java coverage or
-  # sanitized builds as they both require purpose built prebuilts which we do
-  # not provide.
-  BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-ifneq ($(CLANG_COVERAGE)$(NATIVE_COVERAGE_PATHS),)
-  # Always use sources when building with clang coverage and native coverage.
-  # It is possible that there are certain situations when building with coverage
-  # would work with prebuilts, e.g. when the coverage is not being applied to
-  # modules for which we provide prebuilts. Unfortunately, determining that
-  # would require embedding knowledge of which coverage paths affect which
-  # modules here. That would duplicate a lot of information, add yet another
-  # location  module authors have to update and complicate the logic here.
-  # For nowe we will just always build from sources when doing coverage builds.
-  BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# ART does not provide linux_bionic variants needed for products that
-# set HOST_CROSS_OS=linux_bionic.
-ifeq (linux_bionic,${HOST_CROSS_OS})
-  BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
-# ART does not provide host side arm64 variants needed for products that
-# set HOST_CROSS_ARCH=arm64.
-ifeq (arm64,${HOST_CROSS_ARCH})
-  BRANCH_DEFAULT_MODULE_BUILD_FROM_SOURCE := true
-endif
-
 ifneq (,$(MODULE_BUILD_FROM_SOURCE))
   # Keep an explicit setting.
-else ifeq (,$(filter docs sdk win_sdk sdk_addon,$(MAKECMDGOALS))$(findstring com.google.android.conscrypt,$(PRODUCT_PACKAGES))$(findstring com.google.android.go.conscrypt,$(PRODUCT_PACKAGES)))
-  # Prebuilt module SDKs require prebuilt modules to work, and currently
-  # prebuilt modules are only provided for com.google.android(.go)?.xxx. If we can't
-  # find one of them in PRODUCT_PACKAGES then assume com.android.xxx are in use,
-  # and disable prebuilt SDKs. In particular this applies to AOSP builds.
-  #
-  # However, docs/sdk/win_sdk/sdk_addon builds might not include com.google.android.xxx
-  # packages, so for those we respect the default behavior.
+else ifeq (,$(filter docs sdk win_sdk sdk_addon,$(MAKECMDGOALS)))
   MODULE_BUILD_FROM_SOURCE := true
 else ifneq (,$(PRODUCT_MODULE_BUILD_FROM_SOURCE))
   # Let products override the branch default.
@@ -170,6 +132,18 @@
 $(call add_soong_config_var_value,ANDROID,avf_enabled,$(PRODUCT_AVF_ENABLED))
 endif
 
+ifdef PRODUCT_AVF_MICRODROID_GUEST_GKI_VERSION
+$(call add_soong_config_var_value,ANDROID,avf_microdroid_guest_gki_version,$(PRODUCT_AVF_MICRODROID_GUEST_GKI_VERSION))
+endif
+
+ifdef PRODUCT_MEMCG_V2_FORCE_ENABLED
+$(call add_soong_config_var_value,ANDROID,memcg_v2_force_enabled,$(PRODUCT_MEMCG_V2_FORCE_ENABLED))
+endif
+
+ifdef PRODUCT_CGROUP_V2_SYS_APP_ISOLATION_ENABLED
+$(call add_soong_config_var_value,ANDROID,cgroup_v2_sys_app_isolation,$(PRODUCT_CGROUP_V2_SYS_APP_ISOLATION_ENABLED))
+endif
+
 $(call add_soong_config_var_value,ANDROID,release_avf_allow_preinstalled_apps,$(RELEASE_AVF_ALLOW_PREINSTALLED_APPS))
 $(call add_soong_config_var_value,ANDROID,release_avf_enable_device_assignment,$(RELEASE_AVF_ENABLE_DEVICE_ASSIGNMENT))
 $(call add_soong_config_var_value,ANDROID,release_avf_enable_dice_changes,$(RELEASE_AVF_ENABLE_DICE_CHANGES))
@@ -179,9 +153,12 @@
 $(call add_soong_config_var_value,ANDROID,release_avf_enable_vendor_modules,$(RELEASE_AVF_ENABLE_VENDOR_MODULES))
 $(call add_soong_config_var_value,ANDROID,release_avf_enable_virt_cpufreq,$(RELEASE_AVF_ENABLE_VIRT_CPUFREQ))
 $(call add_soong_config_var_value,ANDROID,release_avf_microdroid_kernel_version,$(RELEASE_AVF_MICRODROID_KERNEL_VERSION))
+$(call add_soong_config_var_value,ANDROID,release_avf_support_custom_vm_with_paravirtualized_devices,$(RELEASE_AVF_SUPPORT_CUSTOM_VM_WITH_PARAVIRTUALIZED_DEVICES))
 
 $(call add_soong_config_var_value,ANDROID,release_binder_death_recipient_weak_from_jni,$(RELEASE_BINDER_DEATH_RECIPIENT_WEAK_FROM_JNI))
 
+$(call add_soong_config_var_value,ANDROID,release_package_libandroid_runtime_punch_holes,$(RELEASE_PACKAGE_LIBANDROID_RUNTIME_PUNCH_HOLES))
+
 $(call add_soong_config_var_value,ANDROID,release_selinux_data_data_ignore,$(RELEASE_SELINUX_DATA_DATA_IGNORE))
 
 # Enable system_server optimizations by default unless explicitly set or if
diff --git a/core/base_rules.mk b/core/base_rules.mk
index b8aa5fe..4c92814 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -694,6 +694,16 @@
 endif
 
 ###########################################################
+## SOONG INSTALL PAIRS
+###########################################################
+# Declare dependencies for LOCAL_SOONG_INSTALL_PAIRS in soong to the module it relies on.
+ifneq (,$(LOCAL_SOONG_INSTALLED_MODULE))
+$(my_all_targets): \
+    $(foreach f, $(LOCAL_SOONG_INSTALL_PAIRS),\
+      $(word 2,$(subst :,$(space),$(f))))
+endif
+
+###########################################################
 ## Compatibility suite files.
 ###########################################################
 ifdef LOCAL_COMPATIBILITY_SUITE
diff --git a/core/config.mk b/core/config.mk
index 22ec292..5842594 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -603,8 +603,6 @@
 prebuilt_build_tools_bin := $(prebuilt_build_tools)/$(HOST_PREBUILT_TAG)/asan/bin
 endif
 
-USE_PREBUILT_SDK_TOOLS_IN_PLACE := true
-
 # Work around for b/68406220
 # This should match the soong version.
 USE_D8 := true
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 7f9cbad..1c3a1b3 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -57,18 +57,6 @@
   KEEP_VNDK ?= true
 endif
 
-ifeq ($(KEEP_VNDK),true)
-  # Starting in Android U, non-VNDK devices not supported
-  # WARNING: DO NOT CHANGE: if you are downstream of AOSP, and you change this, without
-  # letting upstream know it's important to you, we may do cleanup which breaks this
-  # significantly. Please let us know if you are changing this.
-  ifndef BOARD_VNDK_VERSION
-  # READ WARNING - DO NOT CHANGE
-  BOARD_VNDK_VERSION := current
-  # READ WARNING - DO NOT CHANGE
-  endif
-endif
-
 # ---------------------------------------------------------------
 # Set up version information
 include $(BUILD_SYSTEM)/version_util.mk
diff --git a/core/main.mk b/core/main.mk
index bc8adde..b798b49 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -1222,8 +1222,7 @@
 # Returns modules included automatically as a result of certain BoardConfig
 # variables being set.
 define auto-included-modules
-  $(if $(and $(BOARD_VNDK_VERSION),$(filter true,$(KEEP_VNDK))),vndk_package) \
-  $(if $(filter true,$(KEEP_VNDK)),,llndk_in_system) \
+  llndk_in_system \
   $(if $(DEVICE_MANIFEST_FILE),vendor_manifest.xml) \
   $(if $(DEVICE_MANIFEST_SKUS),$(foreach sku, $(DEVICE_MANIFEST_SKUS),vendor_manifest_$(sku).xml)) \
   $(if $(ODM_MANIFEST_FILES),odm_manifest.xml) \
diff --git a/core/product.mk b/core/product.mk
index aa9a9a3..e8db0f5 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -325,6 +325,13 @@
 # set this variable to prevent OTA failures.
 _product_list_vars += PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
 
+# If set to true, this product forces HIDL to be enabled by declaring android.hidl.manager
+# and android.hidl.token in the framework manifest. The product will also need to add the
+# 'hwservicemanager' service to PRODUCT_PACKAGES if its SHIPPING_API_LEVEL is greater than 34.
+# This should only be used during bringup for devices that are targeting FCM 202404 and still
+# have partner-owned HIDL interfaces that are being converted to AIDL.
+_product_single_value_vars += PRODUCT_HIDL_ENABLED
+
 # If set to true, this product builds a generic OTA package, which installs generic system images
 # onto matching devices. The product may only build a subset of system images (e.g. only
 # system.img), so devices need to install the package in a system-only OTA manner.
@@ -421,6 +428,12 @@
 # If true, kernel with modules will be used for Microdroid VMs.
 _product_single_value_vars += PRODUCT_AVF_KERNEL_MODULES_ENABLED
 
+# If true, the memory controller will be force-enabled in the cgroup v2 hierarchy
+_product_single_value_vars += PRODUCT_MEMCG_V2_FORCE_ENABLED
+
+# If true, the cgroup v2 hierarchy will be split into apps/system subtrees
+_product_single_value_vars += PRODUCT_CGROUP_V2_SYS_APP_ISOLATION_ENABLED
+
 # List of .json files to be merged/compiled into vendor/etc/linker.config.pb
 _product_list_vars += PRODUCT_VENDOR_LINKER_CONFIG_FRAGMENTS
 
@@ -440,6 +453,9 @@
 # specified we default to COW version 2 in update_engine for backwards compatibility
 _product_single_value_vars += PRODUCT_VIRTUAL_AB_COW_VERSION
 
+# Specifies maximum bytes to be compressed at once during ota. Options: 4096, 8192, 16384, 32768, 65536, 131072, 262144.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_COMPRESSION_FACTOR
+
 # If set, determines whether the build system checks vendor seapp contexts violations.
 _product_single_value_vars += PRODUCT_CHECK_VENDOR_SEAPP_VIOLATIONS
 
diff --git a/core/product_config.mk b/core/product_config.mk
index d16c38d..4eeac95 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -630,6 +630,15 @@
 endif
 endef
 
+ifndef PRODUCT_VIRTUAL_AB_COW_VERSION
+  PRODUCT_VIRTUAL_AB_COW_VERSION := 2
+  ifdef PRODUCT_SHIPPING_API_LEVEL
+    ifeq (true,$(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),34))
+      PRODUCT_VIRTUAL_AB_COW_VERSION := 3
+    endif
+  endif
+endif
+
 # Copy and check the value of each PRODUCT_BUILD_*_IMAGE variable
 $(foreach image, \
     PVMFW \
diff --git a/core/soong_config.mk b/core/soong_config.mk
index e382407..534270e 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -52,8 +52,6 @@
 
 $(call add_json_bool, Release_aidl_use_unfrozen,         $(RELEASE_AIDL_USE_UNFROZEN))
 
-$(call add_json_str,  Platform_min_supported_target_sdk_version, $(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION))
-
 $(call add_json_bool, Allow_missing_dependencies,        $(filter true,$(ALLOW_MISSING_DEPENDENCIES)))
 $(call add_json_bool, Unbundled_build,                   $(TARGET_BUILD_UNBUNDLED))
 $(call add_json_list, Unbundled_build_apps,              $(TARGET_BUILD_APPS))
@@ -152,10 +150,6 @@
 $(call add_json_str,  BtConfigIncludeDir,                $(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR))
 $(call add_json_list, DeviceKernelHeaders,               $(TARGET_DEVICE_KERNEL_HEADERS) $(TARGET_BOARD_KERNEL_HEADERS) $(TARGET_PRODUCT_KERNEL_HEADERS))
 $(call add_json_str,  VendorApiLevel,                    $(BOARD_API_LEVEL))
-ifeq ($(KEEP_VNDK),true)
-$(call add_json_str,  DeviceVndkVersion,                 $(BOARD_VNDK_VERSION))
-$(call add_json_str,  Platform_vndk_version,             $(PLATFORM_VNDK_VERSION))
-endif
 $(call add_json_list, ExtraVndkVersions,                 $(PRODUCT_EXTRA_VNDK_VERSIONS))
 $(call add_json_list, DeviceSystemSdkVersions,           $(BOARD_SYSTEMSDK_VERSIONS))
 $(call add_json_str,  RecoverySnapshotVersion,           $(RECOVERY_SNAPSHOT_VERSION))
diff --git a/core/tasks/automotive-sdv-tests.mk b/core/tasks/automotive-sdv-tests.mk
new file mode 100644
index 0000000..12706ce
--- /dev/null
+++ b/core/tasks/automotive-sdv-tests.mk
@@ -0,0 +1,61 @@
+# Copyright (C) 2022 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.PHONY: automotive-sdv-tests
+
+automotive-sdv-tests-zip := $(PRODUCT_OUT)/automotive-sdv-tests.zip
+# Create an artifact to include a list of test config files in automotive-sdv-tests.
+automotive-sdv-tests-list-zip := $(PRODUCT_OUT)/automotive-sdv-tests_list.zip
+# Create an artifact to include all test config files in automotive-sdv-tests.
+automotive-sdv-tests-configs-zip := $(PRODUCT_OUT)/automotive-sdv-tests_configs.zip
+my_host_shared_lib_for_automotive_sdv_tests := $(call copy-many-files,$(COMPATIBILITY.automotive-sdv-tests.HOST_SHARED_LIBRARY.FILES))
+automotive_sdv_tests_host_shared_libs_zip := $(PRODUCT_OUT)/automotive-sdv-tests_host-shared-libs.zip
+
+$(automotive-sdv-tests-zip) : .KATI_IMPLICIT_OUTPUTS := $(automotive-sdv-tests-list-zip) $(automotive-sdv-tests-configs-zip) $(automotive_sdv_tests_host_shared_libs_zip)
+$(automotive-sdv-tests-zip) : PRIVATE_automotive_sdv_tests_list := $(PRODUCT_OUT)/automotive-sdv-tests_list
+$(automotive-sdv-tests-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_automotive_sdv_tests)
+$(automotive-sdv-tests-zip) : PRIVATE_automotive_host_shared_libs_zip := $(automotive_sdv_tests_host_shared_libs_zip)
+$(automotive-sdv-tests-zip) : $(COMPATIBILITY.automotive-sdv-tests.FILES) $(my_host_shared_lib_for_automotive_sdv_tests) $(SOONG_ZIP)
+	rm -f $@-shared-libs.list
+	echo $(sort $(COMPATIBILITY.automotive-sdv-tests.FILES)) | tr " " "\n" > $@.list
+	grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+	grep -e .*\\.config$$ $@-host.list > $@-host-test-configs.list || true
+	$(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+	  echo $$shared_lib >> $@-host.list; \
+	  echo $$shared_lib >> $@-shared-libs.list; \
+	done
+	grep $(HOST_OUT_TESTCASES) $@-shared-libs.list > $@-host-shared-libs.list || true
+	grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+	grep -e .*\\.config$$ $@-target.list > $@-target-test-configs.list || true
+	$(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
+	$(hide) $(SOONG_ZIP) -d -o $(automotive-sdv-tests-configs-zip) \
+	  -P host -C $(HOST_OUT) -l $@-host-test-configs.list \
+	  -P target -C $(PRODUCT_OUT) -l $@-target-test-configs.list
+	$(SOONG_ZIP) -d -o $(PRIVATE_automotive_host_shared_libs_zip) \
+	  -P host -C $(HOST_OUT) -l $@-host-shared-libs.list
+	rm -f $(PRIVATE_automotive_sdv_tests_list)
+	$(hide) grep -e .*\\.config$$ $@-host.list | sed s%$(HOST_OUT)%host%g > $(PRIVATE_automotive_sdv_tests_list)
+	$(hide) grep -e .*\\.config$$ $@-target.list | sed s%$(PRODUCT_OUT)%target%g >> $(PRIVATE_automotive_sdv_tests_list)
+	$(hide) $(SOONG_ZIP) -d -o $(automotive-sdv-tests-list-zip) -C $(dir $@) -f $(PRIVATE_automotive_sdv_tests_list)
+	rm -f $@.list $@-host.list $@-target.list $@-host-test-configs.list $@-target-test-configs.list \
+	  $@-shared-libs.list $@-host-shared-libs.list $(PRIVATE_automotive_sdv_tests_list)
+
+automotive-sdv-tests: $(automotive-sdv-tests-zip)
+$(call dist-for-goals, automotive-sdv-tests, $(automotive-sdv-tests-zip) $(automotive-sdv-tests-list-zip) $(automotive-sdv-tests-configs-zip) $(automotive_sdv_tests_host_shared_libs_zip))
+
+$(call declare-1p-container,$(automotive-sdv-tests-zip),)
+$(call declare-container-license-deps,$(automotive-sdv-tests-zip),$(COMPATIBILITY.automotive-sdv-tests.FILES) $(my_host_shared_lib_for_automotive_sdv_tests),$(PRODUCT_OUT)/:/)
+
+tests: automotive-sdv-tests
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index 8546828..aa695eb 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -39,7 +39,7 @@
 			$(call write-optional-json-list, "srcjars", $(sort $(ALL_MODULES.$(m).SRCJARS))) \
 			$(call write-optional-json-list, "classes_jar", $(sort $(ALL_MODULES.$(m).CLASSES_JAR))) \
 			$(call write-optional-json-list, "test_mainline_modules", $(sort $(ALL_MODULES.$(m).TEST_MAINLINE_MODULES))) \
-			$(call write-optional-json-bool, $(ALL_MODULES.$(m).IS_UNIT_TEST)) \
+			$(call write-optional-json-bool, "is_unit_test", $(ALL_MODULES.$(m).IS_UNIT_TEST)) \
 			$(call write-optional-json-list, "test_options_tags", $(sort $(ALL_MODULES.$(m).TEST_OPTIONS_TAGS))) \
 			$(call write-optional-json-list, "data", $(sort $(ALL_MODULES.$(m).TEST_DATA))) \
 			$(call write-optional-json-list, "runtime_dependencies", $(sort $(ALL_MODULES.$(m).LOCAL_RUNTIME_LIBRARIES))) \
diff --git a/core/version_util.mk b/core/version_util.mk
index 610cdaf..eb568be 100644
--- a/core/version_util.mk
+++ b/core/version_util.mk
@@ -221,10 +221,8 @@
 endif
 .KATI_READONLY := HAS_BUILD_NUMBER
 
-ifndef PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION
-  # Used to set minimum supported target sdk version. Apps targeting sdk
-  # version lower than the set value will result in a warning being shown
-  # when any activity from the app is started.
-  PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION := 28
+ifdef PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION
+  $(error Do not set PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION directly. Use RELEASE_PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION. value: $(PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION))
 endif
+PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION := $(RELEASE_PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION)
 .KATI_READONLY := PLATFORM_MIN_SUPPORTED_TARGET_SDK_VERSION
diff --git a/envsetup.sh b/envsetup.sh
index fbe522d..ca75132 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1103,6 +1103,48 @@
     $ADB "${@}"
 }
 
+function run_tool_with_logging() {
+  # Run commands in a subshell for us to handle forced terminations with a trap
+  # handler.
+  (
+  local tool_tag="$1"
+  shift
+  local tool_binary="$1"
+  shift
+
+  # If logging is not enabled or the logger is not configured, run the original command and return.
+  if [[ "${ANDROID_ENABLE_TOOL_LOGGING}" != "true" ]] || [[ -z "${ANDROID_TOOL_LOGGER}" ]]; then
+     "${tool_binary}" "${@}"
+     return $?
+  fi
+
+  # Otherwise, run the original command and call the logger when done.
+  local start_time
+  start_time=$(date +%s.%N)
+  local logger=${ANDROID_TOOL_LOGGER}
+
+  # Install a trap to call the logger even when the process terminates abnormally.
+  # The logger is run in the background and its output suppressed to avoid
+  # interference with the user flow.
+  trap '
+  exit_code=$?;
+  # Remove the trap to prevent duplicate log.
+  trap - EXIT;
+  "${logger}" \
+    --tool_tag "${tool_tag}" \
+    --start_timestamp "${start_time}" \
+    --end_timestamp "$(date +%s.%N)" \
+    --tool_args \""${@}"\" \
+    --exit_code "${exit_code}" \
+    > /dev/null 2>&1 &
+  exit ${exit_code}
+  ' SIGINT SIGTERM SIGQUIT EXIT
+
+  # Run the original command.
+  "${tool_binary}" "${@}"
+  )
+}
+
 # simplified version of ps; output in the form
 # <pid> <procname>
 function qpid() {
diff --git a/target/board/BoardConfigMainlineCommon.mk b/target/board/BoardConfigMainlineCommon.mk
index 2b17349..b5e3dc2 100644
--- a/target/board/BoardConfigMainlineCommon.mk
+++ b/target/board/BoardConfigMainlineCommon.mk
@@ -24,11 +24,6 @@
 # the devices with metadata parition
 BOARD_USES_METADATA_PARTITION := true
 
-ifeq ($(KEEP_VNDK),true)
-# Default is current, but allow devices to override vndk version if needed.
-BOARD_VNDK_VERSION ?= current
-endif
-
 # 64 bit mediadrmserver
 TARGET_ENABLE_MEDIADRM_64 := true
 
diff --git a/target/product/aosp_arm64.mk b/target/product/aosp_arm64.mk
index d3514a5..d944615 100644
--- a/target/product/aosp_arm64.mk
+++ b/target/product/aosp_arm64.mk
@@ -72,3 +72,5 @@
 PRODUCT_DEVICE := generic_arm64
 PRODUCT_BRAND := Android
 PRODUCT_MODEL := AOSP on ARM64
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/aosp_product.mk b/target/product/aosp_product.mk
index f72f2df..3a5b622 100644
--- a/target/product/aosp_product.mk
+++ b/target/product/aosp_product.mk
@@ -34,7 +34,6 @@
     PhotoTable \
     preinstalled-packages-platform-aosp-product.xml \
     ThemePicker \
-    WallpaperPicker \
 
 # Telephony:
 #   Provide a APN configuration to GSI product
diff --git a/target/product/aosp_x86_64.mk b/target/product/aosp_x86_64.mk
index 3040dd3..4344f50 100644
--- a/target/product/aosp_x86_64.mk
+++ b/target/product/aosp_x86_64.mk
@@ -74,3 +74,5 @@
 PRODUCT_DEVICE := generic_x86_64
 PRODUCT_BRAND := Android
 PRODUCT_MODEL := AOSP on x86_64
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 8dc680b..57e8275 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -172,6 +172,7 @@
     libjpeg \
     liblog \
     libm.bootstrap \
+    libmdnssd \
     libmedia \
     libmedia_jni \
     libmediandk \
@@ -422,8 +423,7 @@
 
 PRODUCT_PACKAGES += init.usb.rc init.usb.configfs.rc
 
-PRODUCT_COPY_FILES += \
-    system/core/rootdir/etc/hosts:system/etc/hosts
+PRODUCT_PACKAGES += etc_hosts
 
 PRODUCT_PACKAGES += init.zygote32.rc
 PRODUCT_VENDOR_PROPERTIES += ro.zygote?=zygote32
@@ -483,10 +483,8 @@
 PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
     frameworks/base/config/preloaded-classes:system/etc/preloaded-classes)
 
-# Note: it is acceptable to not have a dirty-image-objects file. In that case, the special bin
-#       for known dirty objects in the image will be empty.
-PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
-    frameworks/base/config/dirty-image-objects:system/etc/dirty-image-objects)
+# Enable dirty image object binning to reduce dirty pages in the image.
+PRODUCT_PACKAGES += dirty-image-objects
 
 $(call inherit-product, $(SRC_TARGET_DIR)/product/runtime_libart.mk)
 
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index 3bb65ac..36897fe 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -1,96 +1,22 @@
 LOCAL_PATH:= $(call my-dir)
 
 #####################################################################
-# list of vndk libraries from the source code.
-INTERNAL_VNDK_LIB_LIST := $(SOONG_VNDK_LIBRARIES_FILE)
-
-#####################################################################
 # Check the generate list against the latest list stored in the
 # source tree
-.PHONY: check-vndk-list
+.PHONY: check-abi-dump-list
 
 # Check if vndk list is changed
-droidcore: check-vndk-list
+droidcore: check-abi-dump-list
 
-check-vndk-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-list-timestamp
-check-vndk-abi-dump-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-abi-dump-list-timestamp
+check-abi-dump-list-timestamp := $(call intermediates-dir-for,PACKAGING,vndk)/check-abi-dump-list-timestamp
 
-ifeq ($(TARGET_IS_64_BIT)|$(TARGET_2ND_ARCH),true|)
-# TODO(b/110429754) remove this condition when we support 64-bit-only device
-check-vndk-list: ;
-else ifeq ($(TARGET_SKIP_CURRENT_VNDK),true)
-check-vndk-list: ;
-else ifeq ($(BOARD_VNDK_VERSION),)
-check-vndk-list: ;
-else
-check-vndk-list: $(check-vndk-list-timestamp)
-ifneq ($(SKIP_ABI_CHECKS),true)
-check-vndk-list: $(check-vndk-abi-dump-list-timestamp)
+# The ABI tool does not support sanitizer and coverage builds.
+ifeq (,$(filter true,$(SKIP_ABI_CHECKS) $(CLANG_COVERAGE)))
+ifeq (,$(SANITIZE_TARGET))
+check-abi-dump-list: $(check-abi-dump-list-timestamp)
 endif
 endif
 
-_vndk_check_failure_message := " error: VNDK library list has been changed.\n"
-ifeq (REL,$(PLATFORM_VERSION_CODENAME))
-_vndk_check_failure_message += "       Changing the VNDK library list is not allowed in API locked branches."
-else
-_vndk_check_failure_message += "       Run \`update-vndk-list.sh\` to update $(LATEST_VNDK_LIB_LIST)"
-endif
-
-# The *-ndk_platform.so libraries no longer exist and are removed from the VNDK set. However, they
-# can exist if NEED_AIDL_NDK_PLATFORM_BACKEND is set to true for legacy devices. Don't be bothered
-# with the extraneous libraries.
-ifeq ($(NEED_AIDL_NDK_PLATFORM_BACKEND),true)
-	_READ_INTERNAL_VNDK_LIB_LIST := sed /ndk_platform.so/d $(INTERNAL_VNDK_LIB_LIST)
-else
-	_READ_INTERNAL_VNDK_LIB_LIST := cat $(INTERNAL_VNDK_LIB_LIST)
-endif
-
-$(check-vndk-list-timestamp): $(INTERNAL_VNDK_LIB_LIST) $(LATEST_VNDK_LIB_LIST) $(HOST_OUT_EXECUTABLES)/update-vndk-list.sh
-	$(hide) ($(_READ_INTERNAL_VNDK_LIB_LIST) | sort | \
-	diff --old-line-format="Removed %L" \
-	  --new-line-format="Added %L" \
-	  --unchanged-line-format="" \
-	  <(cat $(LATEST_VNDK_LIB_LIST) | sort) - \
-	  || ( echo -e $(_vndk_check_failure_message); exit 1 ))
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-#####################################################################
-# Script to update the latest VNDK lib list
-include $(CLEAR_VARS)
-LOCAL_MODULE := update-vndk-list.sh
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE_STEM := $(LOCAL_MODULE)
-LOCAL_IS_HOST_MODULE := true
-include $(BUILD_SYSTEM)/base_rules.mk
-$(LOCAL_BUILT_MODULE): PRIVATE_INTERNAL_VNDK_LIB_LIST := $(INTERNAL_VNDK_LIB_LIST)
-$(LOCAL_BUILT_MODULE): PRIVATE_LATEST_VNDK_LIB_LIST := $(LATEST_VNDK_LIB_LIST)
-$(LOCAL_BUILT_MODULE):
-	@echo "Generate: $@"
-	@mkdir -p $(dir $@)
-	@rm -f $@
-	$(hide) echo "#!/bin/bash" > $@
-ifeq (REL,$(PLATFORM_VERSION_CODENAME))
-	$(hide) echo "echo Updating VNDK library list is NOT allowed in API locked branches." >> $@; \
-	        echo "exit 1" >> $@
-else
-	$(hide) echo "if [ -z \"\$${ANDROID_BUILD_TOP}\" ]; then" >> $@; \
-	        echo "  echo Run lunch or choosecombo first" >> $@; \
-	        echo "  exit 1" >> $@; \
-	        echo "fi" >> $@; \
-	        echo "cd \$${ANDROID_BUILD_TOP}" >> $@
-ifeq ($(NEED_AIDL_NDK_PLATFORM_BACKEND),true)
-	$(hide) echo "sed /ndk_platform.so/d $(PRIVATE_INTERNAL_VNDK_LIB_LIST) > $(PRIVATE_LATEST_VNDK_LIB_LIST)" >> $@
-else
-	$(hide) echo "cp $(PRIVATE_INTERNAL_VNDK_LIB_LIST) $(PRIVATE_LATEST_VNDK_LIB_LIST)" >> $@
-endif
-	$(hide) echo "echo $(PRIVATE_LATEST_VNDK_LIB_LIST) updated." >> $@
-endif
-	@chmod a+x $@
-
 #####################################################################
 # ABI reference dumps.
 
@@ -111,6 +37,9 @@
 endef
 
 # Subsets of LSDUMP_PATHS.
+.PHONY: findlsdumps_APEX
+findlsdumps_APEX: $(LSDUMP_PATHS_FILE) $(call filter-abi-dump-paths,APEX,$(LSDUMP_PATHS))
+
 .PHONY: findlsdumps_LLNDK
 findlsdumps_LLNDK: $(LSDUMP_PATHS_FILE) $(call filter-abi-dump-paths,LLNDK,$(LSDUMP_PATHS))
 
@@ -125,7 +54,7 @@
 
 #####################################################################
 # Check that all ABI reference dumps have corresponding
-# NDK/VNDK/PLATFORM libraries.
+# APEX/LLNDK/PLATFORM libraries.
 
 # $(1): The directory containing ABI dumps.
 # Return a list of ABI dump paths ending with .so.lsdump.
@@ -137,52 +66,47 @@
 
 # $(1): A list of tags.
 # $(2): A list of tag:path.
-# Return the file names of the ABI dumps that match the tags.
+# Return the file names of the ABI dumps that match the tags, and replace the
+# file name extensions with .so.lsdump.
 define filter-abi-dump-names
-$(notdir $(call filter-abi-dump-paths,$(1),$(2)))
+$(patsubst %.so.llndk.lsdump,%.so.lsdump, \
+  $(patsubst %.so.apex.lsdump,%.so.lsdump, \
+    $(notdir $(call filter-abi-dump-paths,$(1),$(2)))))
 endef
 
-
+VNDK_ABI_DUMP_DIR := prebuilts/abi-dumps/vndk/$(RELEASE_BOARD_API_LEVEL)
 ifeq (REL,$(PLATFORM_VERSION_CODENAME))
-    NDK_ABI_DUMP_DIR := prebuilts/abi-dumps/ndk/$(PLATFORM_SDK_VERSION)
     PLATFORM_ABI_DUMP_DIR := prebuilts/abi-dumps/platform/$(PLATFORM_SDK_VERSION)
 else
-    NDK_ABI_DUMP_DIR := prebuilts/abi-dumps/ndk/current
     PLATFORM_ABI_DUMP_DIR := prebuilts/abi-dumps/platform/current
 endif
-NDK_ABI_DUMPS := $(call find-abi-dump-paths,$(NDK_ABI_DUMP_DIR))
+VNDK_ABI_DUMPS := $(call find-abi-dump-paths,$(VNDK_ABI_DUMP_DIR))
 PLATFORM_ABI_DUMPS := $(call find-abi-dump-paths,$(PLATFORM_ABI_DUMP_DIR))
 
 # Check for superfluous lsdump files. Since LSDUMP_PATHS only covers the
 # libraries that can be built from source in the current build, and prebuilts of
 # Mainline modules may be in use, we also allow the libs in STUB_LIBRARIES for
-# NDK and platform ABIs.
+# platform ABIs.
+# In addition, libRS is allowed because it's disabled for RISC-V.
 
-$(check-vndk-abi-dump-list-timestamp): PRIVATE_LSDUMP_PATHS := $(LSDUMP_PATHS)
-$(check-vndk-abi-dump-list-timestamp): PRIVATE_STUB_LIBRARIES := $(STUB_LIBRARIES)
-$(check-vndk-abi-dump-list-timestamp):
+$(check-abi-dump-list-timestamp): PRIVATE_LSDUMP_PATHS := $(LSDUMP_PATHS)
+$(check-abi-dump-list-timestamp): PRIVATE_STUB_LIBRARIES := $(STUB_LIBRARIES)
+$(check-abi-dump-list-timestamp):
 	$(eval added_vndk_abi_dumps := $(strip $(sort $(filter-out \
-	  $(call filter-abi-dump-names,LLNDK VNDK-SP VNDK-core,$(PRIVATE_LSDUMP_PATHS)), \
+	  $(call filter-abi-dump-names,LLNDK,$(PRIVATE_LSDUMP_PATHS)) libRS.so.lsdump, \
 	  $(notdir $(VNDK_ABI_DUMPS))))))
 	$(if $(added_vndk_abi_dumps), \
 	  echo -e "Found unexpected ABI reference dump files under $(VNDK_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(VNDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_vndk_abi_dumps)) ')' -delete\` to delete the dump files.")
 
-	$(eval added_ndk_abi_dumps := $(strip $(sort $(filter-out \
-	  $(call filter-abi-dump-names,NDK,$(PRIVATE_LSDUMP_PATHS)) \
-	  $(addsuffix .lsdump,$(PRIVATE_STUB_LIBRARIES)), \
-	  $(notdir $(NDK_ABI_DUMPS))))))
-	$(if $(added_ndk_abi_dumps), \
-	  echo -e "Found unexpected ABI reference dump files under $(NDK_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(NDK_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_ndk_abi_dumps)) ')' -delete\` to delete the dump files.")
-
 	# TODO(b/314010764): Remove LLNDK tag after PLATFORM_SDK_VERSION is upgraded to 35.
 	$(eval added_platform_abi_dumps := $(strip $(sort $(filter-out \
-	  $(call filter-abi-dump-names,LLNDK PLATFORM,$(PRIVATE_LSDUMP_PATHS)) \
-	  $(addsuffix .lsdump,$(PRIVATE_STUB_LIBRARIES)), \
+	  $(call filter-abi-dump-names,APEX LLNDK PLATFORM,$(PRIVATE_LSDUMP_PATHS)) \
+	  $(addsuffix .lsdump,$(PRIVATE_STUB_LIBRARIES)) libRS.so.lsdump, \
 	  $(notdir $(PLATFORM_ABI_DUMPS))))))
 	$(if $(added_platform_abi_dumps), \
 	  echo -e "Found unexpected ABI reference dump files under $(PLATFORM_ABI_DUMP_DIR). It is caused by mismatch between Android.bp and the dump files. Run \`find \$${ANDROID_BUILD_TOP}/$(PLATFORM_ABI_DUMP_DIR) '(' -name $(subst $(space), -or -name ,$(added_platform_abi_dumps)) ')' -delete\` to delete the dump files.")
 
-	$(if $(added_vndk_abi_dumps)$(added_ndk_abi_dumps)$(added_platform_abi_dumps),exit 1)
+	$(if $(added_vndk_abi_dumps)$(added_platform_abi_dumps),exit 1)
 	$(hide) mkdir -p $(dir $@)
 	$(hide) touch $@
 
@@ -190,27 +114,6 @@
 # VNDK package and snapshot.
 
 include $(CLEAR_VARS)
-LOCAL_MODULE := vndk_package
-LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
-LOCAL_LICENSE_CONDITIONS := notice
-LOCAL_NOTICE_FILE := build/soong/licenses/LICENSE
-# Filter LLNDK libs moved to APEX to avoid pulling them into /system/LIB
-LOCAL_REQUIRED_MODULES := llndk_in_system
-
-ifneq ($(TARGET_SKIP_CURRENT_VNDK),true)
-LOCAL_REQUIRED_MODULES += \
-    vndkcorevariant.libraries.txt \
-    $(addsuffix .vendor,$(VNDK_CORE_LIBRARIES)) \
-    $(addsuffix .vendor,$(VNDK_SAMEPROCESS_LIBRARIES)) \
-    $(VNDK_USING_CORE_VARIANT_LIBRARIES)
-
-LOCAL_ADDITIONAL_DEPENDENCIES += $(call module-built-files,\
-    $(addsuffix .vendor,$(VNDK_CORE_LIBRARIES) $(VNDK_SAMEPROCESS_LIBRARIES)))
-
-endif
-include $(BUILD_PHONY_PACKAGE)
-
-include $(CLEAR_VARS)
 
 LOCAL_MODULE := vndk_apex_snapshot_package
 LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 2e37366..884b419 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -44,9 +44,6 @@
 # Enable dynamic partition size
 PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
 
-# Disable the build-time debugfs restrictions on GSI builds
-PRODUCT_SET_DEBUGFS_RESTRICTIONS := false
-
 # GSI specific tasks on boot
 PRODUCT_PACKAGES += \
     gsi_skip_mount.cfg \
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 38ba219..503c9b3 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -59,10 +59,6 @@
 PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
     frameworks/base/config/compiled-classes-phone:system/etc/compiled-classes)
 
-# Enable dirty image object binning to reduce dirty pages in the image.
-PRODUCT_COPY_FILES += $(call add-to-product-copy-files-if-exists,\
-    frameworks/base/dirty-image-objects-phone:system/etc/dirty-image-objects)
-
 # On userdebug builds, collect more tombstones by default.
 ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
 PRODUCT_VENDOR_PROPERTIES += \
diff --git a/target/product/module_arm64.mk b/target/product/module_arm64.mk
index 2e8c8a7..634a03d 100644
--- a/target/product/module_arm64.mk
+++ b/target/product/module_arm64.mk
@@ -19,3 +19,5 @@
 
 PRODUCT_NAME := module_arm64
 PRODUCT_DEVICE := module_arm64
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/module_arm64only.mk b/target/product/module_arm64only.mk
index c0769bf..822ac24 100644
--- a/target/product/module_arm64only.mk
+++ b/target/product/module_arm64only.mk
@@ -19,3 +19,5 @@
 
 PRODUCT_NAME := module_arm64only
 PRODUCT_DEVICE := module_arm64only
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/module_x86_64.mk b/target/product/module_x86_64.mk
index 20f443a..9bd0264 100644
--- a/target/product/module_x86_64.mk
+++ b/target/product/module_x86_64.mk
@@ -19,3 +19,5 @@
 
 PRODUCT_NAME := module_x86_64
 PRODUCT_DEVICE := module_x86_64
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/module_x86_64only.mk b/target/product/module_x86_64only.mk
index b0d72bf..056fb90 100644
--- a/target/product/module_x86_64only.mk
+++ b/target/product/module_x86_64only.mk
@@ -19,3 +19,5 @@
 
 PRODUCT_NAME := module_x86_64only
 PRODUCT_DEVICE := module_x86_64only
+
+PRODUCT_NO_BIONIC_PAGE_SIZE_MACRO := true
diff --git a/target/product/virtual_ab_ota/android_t_baseline.mk b/target/product/virtual_ab_ota/android_t_baseline.mk
index af0f7a9..418aaa4 100644
--- a/target/product/virtual_ab_ota/android_t_baseline.mk
+++ b/target/product/virtual_ab_ota/android_t_baseline.mk
@@ -20,5 +20,3 @@
 #
 # All U+ launching devices should instead use vabc_features.mk.
 $(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/vabc_features.mk)
-
-PRODUCT_VIRTUAL_AB_COW_VERSION ?= 2
diff --git a/target/product/virtual_ab_ota/compression.mk b/target/product/virtual_ab_ota/compression.mk
index dc1ee3e..c964860 100644
--- a/target/product/virtual_ab_ota/compression.mk
+++ b/target/product/virtual_ab_ota/compression.mk
@@ -28,5 +28,4 @@
 PRODUCT_VIRTUAL_AB_COMPRESSION := true
 PRODUCT_PACKAGES += \
     snapuserd.vendor_ramdisk \
-    snapuserd \
-    snapuserd.recovery
+    snapuserd
diff --git a/target/product/virtual_ab_ota/compression_retrofit.mk b/target/product/virtual_ab_ota/compression_retrofit.mk
index 6c29cba..118d3f2 100644
--- a/target/product/virtual_ab_ota/compression_retrofit.mk
+++ b/target/product/virtual_ab_ota/compression_retrofit.mk
@@ -24,5 +24,4 @@
 # as well.
 PRODUCT_PACKAGES += \
     snapuserd.ramdisk \
-    snapuserd \
-    snapuserd.recovery
+    snapuserd
diff --git a/target/product/virtual_ab_ota/vabc_features.mk b/target/product/virtual_ab_ota/vabc_features.mk
index 874eb9c..3f484e4 100644
--- a/target/product/virtual_ab_ota/vabc_features.mk
+++ b/target/product/virtual_ab_ota/vabc_features.mk
@@ -38,6 +38,9 @@
 # Enabling this property, will improve OTA install time
 # but will use an additional CPU core
 # PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.threads=true
+ifndef PRODUCT_VIRTUAL_AB_COMPRESSION_FACTOR
+    PRODUCT_VIRTUAL_AB_COMPRESSION_FACTOR := 65536
+endif
 
 PRODUCT_VIRTUAL_AB_COMPRESSION := true
 PRODUCT_VIRTUAL_AB_COMPRESSION_METHOD ?= none
diff --git a/teams/Android.bp b/teams/Android.bp
index 8f83e71..a02a573 100644
--- a/teams/Android.bp
+++ b/teams/Android.bp
@@ -732,7 +732,7 @@
 }
 
 team {
-    name: "trendy_team_deprecated_systemui_gfx",
+    name: "trendy_team_ailabs",
 
     // go/trendy/manage/engineers/6673470538285056
     trendy_team_id: "6673470538285056",
diff --git a/tests/Android.bp b/tests/Android.bp
new file mode 100644
index 0000000..b2ff583
--- /dev/null
+++ b/tests/Android.bp
@@ -0,0 +1,40 @@
+// Copyright 2024 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+python_test_host {
+    name: "run_tool_with_logging_test",
+    main: "run_tool_with_logging_test.py",
+    pkg_path: "testdata",
+    srcs: [
+        "run_tool_with_logging_test.py",
+    ],
+    test_options: {
+        unit_test: true,
+    },
+    data: [
+        ":envsetup_minimum.zip",
+    ],
+    test_suites: [
+        "general-tests",
+    ],
+    version: {
+        py3: {
+            embedded_launcher: true,
+        },
+    },
+}
diff --git a/tests/run_tool_with_logging_test.py b/tests/run_tool_with_logging_test.py
new file mode 100644
index 0000000..1eb78f1
--- /dev/null
+++ b/tests/run_tool_with_logging_test.py
@@ -0,0 +1,336 @@
+# Copyright 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import dataclasses
+from importlib import resources
+import logging
+import os
+from pathlib import Path
+import re
+import signal
+import stat
+import subprocess
+import tempfile
+import textwrap
+import time
+import unittest
+import zipfile
+import sys
+
+EXII_RETURN_CODE = 0
+INTERRUPTED_RETURN_CODE = 130
+
+
+class RunToolWithLoggingTest(unittest.TestCase):
+
+  @classmethod
+  def setUpClass(cls):
+    super().setUpClass()
+    # Configure to print logging to stdout.
+    logging.basicConfig(filename=None, level=logging.DEBUG)
+    console = logging.StreamHandler(sys.stdout)
+    logging.getLogger('').addHandler(console)
+
+  def setUp(self):
+    super().setUp()
+    self.working_dir = tempfile.TemporaryDirectory()
+    # Run all the tests from working_dir which is our temp Android build top.
+    os.chdir(self.working_dir.name)
+    # Extract envsetup.zip which contains the envsetup.sh and other dependent
+    # scripts required to set up the build environments.
+    with resources.files("testdata").joinpath("envsetup.zip").open('rb') as p:
+      with zipfile.ZipFile(p, "r") as zip_f:
+        zip_f.extractall()
+
+  def tearDown(self):
+    self.working_dir.cleanup()
+    super().tearDown()
+
+  def test_does_not_log_when_logging_disabled(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=false
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_tool.assert_called_once_with_args("arg1 arg2")
+    test_logger.assert_not_called()
+
+  def test_does_not_log_when_logger_var_unset(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      unset ANDROID_ENABLE_TOOL_LOGGING
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_tool.assert_called_once_with_args("arg1 arg2")
+    test_logger.assert_not_called()
+
+  def test_does_not_log_when_logger_var_empty(self):
+    test_tool = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER=""
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_tool.assert_called_once_with_args("arg1 arg2")
+
+  def test_does_not_log_with_logger_unset(self):
+    test_tool = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      unset ANDROID_TOOL_LOGGER
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_tool.assert_called_once_with_args("arg1 arg2")
+
+  def test_log_success_with_logger_enabled(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_tool.assert_called_once_with_args("arg1 arg2")
+    expected_logger_args = (
+        "--tool_tag FAKE_TOOL --start_timestamp \d+\.\d+ --end_timestamp"
+        ' \d+\.\d+ --tool_args "arg1 arg2" --exit_code 0'
+    )
+    test_logger.assert_called_once_with_args(expected_logger_args)
+
+  def test_run_tool_output_is_same_with_and_without_logging(self):
+    test_tool = TestScript.create(self.working_dir, "echo 'tool called'")
+    test_logger = TestScript.create(self.working_dir)
+
+    run_tool_with_logging_stdout, run_tool_with_logging_stderr = (
+        self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+    )
+
+    run_tool_without_logging_stdout, run_tool_without_logging_stderr = (
+        self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      {test_tool.executable} arg1 arg2
+    """)
+    )
+
+    self.assertEqual(
+        run_tool_with_logging_stdout, run_tool_without_logging_stdout
+    )
+    self.assertEqual(
+        run_tool_with_logging_stderr, run_tool_without_logging_stderr
+    )
+
+  def test_logger_output_is_suppressed(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir, "echo 'logger called'")
+
+    run_tool_with_logging_output, _ = self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    self.assertNotIn("logger called", run_tool_with_logging_output)
+
+  def test_logger_error_is_suppressed(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(
+        self.working_dir, "echo 'logger failed' > /dev/stderr; exit 1"
+    )
+
+    _, err = self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    self.assertNotIn("logger failed", err)
+
+  def test_log_success_when_tool_interrupted(self):
+    test_tool = TestScript.create(self.working_dir, script_body="sleep 100")
+    test_logger = TestScript.create(self.working_dir)
+
+    process = self._run_script_in_build_env(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    pgid = os.getpgid(process.pid)
+    # Give sometime for the subprocess to start.
+    time.sleep(1)
+    # Kill the subprocess and any processes created in the same group.
+    os.killpg(pgid, signal.SIGINT)
+
+    returncode, _, _ = self._wait_for_process(process)
+    self.assertEqual(returncode, INTERRUPTED_RETURN_CODE)
+
+    expected_logger_args = (
+        "--tool_tag FAKE_TOOL --start_timestamp \d+\.\d+ --end_timestamp"
+        ' \d+\.\d+ --tool_args "arg1 arg2" --exit_code 130'
+    )
+    test_logger.assert_called_once_with_args(expected_logger_args)
+
+  def test_logger_can_be_toggled_on(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=false
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_logger.assert_called_with_times(1)
+
+  def test_logger_can_be_toggled_off(self):
+    test_tool = TestScript.create(self.working_dir)
+    test_logger = TestScript.create(self.working_dir)
+
+    self._run_script_and_wait(f"""
+      ANDROID_ENABLE_TOOL_LOGGING=true
+      ANDROID_TOOL_LOGGER="{test_logger.executable}"
+      ANDROID_ENABLE_TOOL_LOGGING=false
+      run_tool_with_logging "FAKE_TOOL" {test_tool.executable} arg1 arg2
+    """)
+
+    test_logger.assert_not_called()
+
+  def _create_build_env_script(self) -> str:
+    return f"""
+      source {Path(self.working_dir.name).joinpath("build/make/envsetup.sh")}
+    """
+
+  def _run_script_and_wait(self, test_script: str) -> tuple[str, str]:
+    process = self._run_script_in_build_env(test_script)
+    returncode, out, err = self._wait_for_process(process)
+    logging.debug("script stdout: %s", out)
+    logging.debug("script stderr: %s", err)
+    self.assertEqual(returncode, EXII_RETURN_CODE)
+    return out, err
+
+  def _run_script_in_build_env(self, test_script: str) -> subprocess.Popen:
+    setup_build_env_script = self._create_build_env_script()
+    return subprocess.Popen(
+        setup_build_env_script + test_script,
+        shell=True,
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        text=True,
+        start_new_session=True,
+        executable='/bin/bash'
+        )
+
+  def _wait_for_process(
+      self, process: subprocess.Popen
+  ) -> tuple[int, str, str]:
+    pgid = os.getpgid(process.pid)
+    out, err = process.communicate()
+    # Wait for all process in the same group to complete since the logger runs
+    # as a separate detached process.
+    self._wait_for_process_group(pgid)
+    return (process.returncode, out, err)
+
+  def _wait_for_process_group(self, pgid: int, timeout: int = 5):
+    """Waits for all subprocesses within the process group to complete."""
+    start_time = time.time()
+    while True:
+      if time.time() - start_time > timeout:
+        raise TimeoutError(
+            f"Process group did not complete after {timeout} seconds"
+        )
+      for pid in os.listdir("/proc"):
+        if pid.isdigit():
+          try:
+            if os.getpgid(int(pid)) == pgid:
+              time.sleep(0.1)
+              break
+          except (FileNotFoundError, PermissionError, ProcessLookupError):
+            pass
+      else:
+        # All processes have completed.
+        break
+
+
+@dataclasses.dataclass
+class TestScript:
+  executable: Path
+  output_file: Path
+
+  def create(temp_dir: Path, script_body: str = ""):
+    with tempfile.NamedTemporaryFile(dir=temp_dir.name, delete=False) as f:
+      output_file = f.name
+
+    with tempfile.NamedTemporaryFile(dir=temp_dir.name, delete=False) as f:
+      executable = f.name
+      executable_contents = textwrap.dedent(f"""
+      #!/bin/bash
+
+      echo "${{@}}" >> {output_file}
+      {script_body}
+      """)
+      f.write(executable_contents.encode("utf-8"))
+
+    os.chmod(f.name, os.stat(f.name).st_mode | stat.S_IEXEC)
+
+    return TestScript(executable, output_file)
+
+  def assert_called_with_times(self, expected_call_times: int):
+    lines = self._read_contents_from_output_file()
+    assert len(lines) == expected_call_times, (
+        f"Expect to call {expected_call_times} times, but actually called"
+        f" {len(lines)} times."
+    )
+
+  def assert_called_with_args(self, expected_args: str):
+    lines = self._read_contents_from_output_file()
+    assert len(lines) > 0
+    assert re.search(expected_args, lines[0]), (
+        f"Expect to call with args {expected_args}, but actually called with"
+        f" args {lines[0]}."
+    )
+
+  def assert_not_called(self):
+    self.assert_called_with_times(0)
+
+  def assert_called_once_with_args(self, expected_args: str):
+    self.assert_called_with_times(1)
+    self.assert_called_with_args(expected_args)
+
+  def _read_contents_from_output_file(self) -> list[str]:
+    with open(self.output_file, "r") as f:
+      return f.readlines()
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/tools/aconfig/Cargo.toml b/tools/aconfig/Cargo.toml
index 6bd0d06..bf5e1a9 100644
--- a/tools/aconfig/Cargo.toml
+++ b/tools/aconfig/Cargo.toml
@@ -2,6 +2,7 @@
 
 members = [
     "aconfig",
+    "aconfig_device_paths",
     "aconfig_protos",
     "aconfig_storage_file",
     "aconfig_storage_read_api",
diff --git a/tools/aconfig/TEST_MAPPING b/tools/aconfig/TEST_MAPPING
index 638b92a..b7ff8ef 100644
--- a/tools/aconfig/TEST_MAPPING
+++ b/tools/aconfig/TEST_MAPPING
@@ -84,6 +84,10 @@
       "name": "aconfig_storage_write_api.test.rust"
     },
     {
+      // aconfig_storage write api cpp integration tests
+      "name": "aconfig_storage_write_api.test.cpp"
+    },
+    {
       // aconfig_storage read api rust integration tests
       "name": "aconfig_storage_read_api.test.rust"
     },
@@ -93,9 +97,5 @@
     }
   ],
   "postsubmit": [
-    {
-      // aconfig_storage write api cpp integration tests
-      "name": "aconfig_storage_write_api.test.cpp"
-    }
   ]
 }
diff --git a/tools/aconfig/aconfig/src/storage/flag_table.rs b/tools/aconfig/aconfig/src/storage/flag_table.rs
index b339821..a971211 100644
--- a/tools/aconfig/aconfig/src/storage/flag_table.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_table.rs
@@ -48,7 +48,7 @@
         package_id: u32,
         flag_name: &str,
         flag_type: StoredFlagType,
-        flag_id: u16,
+        flag_index: u16,
         num_buckets: u32,
     ) -> Self {
         let bucket_index = FlagTableNode::find_bucket_index(package_id, flag_name, num_buckets);
@@ -56,7 +56,7 @@
             package_id,
             flag_name: flag_name.to_string(),
             flag_type,
-            flag_id,
+            flag_index,
             next_offset: None,
         };
         Self { node, bucket_index }
diff --git a/tools/aconfig/aconfig/src/storage/flag_value.rs b/tools/aconfig/aconfig/src/storage/flag_value.rs
index a37ad9f..c15ba54 100644
--- a/tools/aconfig/aconfig/src/storage/flag_value.rs
+++ b/tools/aconfig/aconfig/src/storage/flag_value.rs
@@ -41,14 +41,14 @@
     };
 
     for pkg in packages.iter() {
-        let start_offset = pkg.boolean_offset as usize;
+        let start_index = pkg.boolean_start_index as usize;
         let flag_ids = assign_flag_ids(pkg.package_name, pkg.boolean_flags.iter().copied())?;
         for pf in pkg.boolean_flags.iter() {
             let fid = flag_ids
                 .get(pf.name())
                 .ok_or(anyhow!(format!("missing flag id for {}", pf.name())))?;
 
-            list.booleans[start_offset + (*fid as usize)] = pf.state() == ProtoFlagState::ENABLED;
+            list.booleans[start_index + (*fid as usize)] = pf.state() == ProtoFlagState::ENABLED;
         }
     }
 
diff --git a/tools/aconfig/aconfig/src/storage/mod.rs b/tools/aconfig/aconfig/src/storage/mod.rs
index 30517de..855ed02 100644
--- a/tools/aconfig/aconfig/src/storage/mod.rs
+++ b/tools/aconfig/aconfig/src/storage/mod.rs
@@ -33,9 +33,9 @@
     pub package_id: u32,
     pub flag_names: HashSet<&'a str>,
     pub boolean_flags: Vec<&'a ProtoParsedFlag>,
-    // offset of the first boolean flag in this flag package with respect to the start of
-    // boolean flag value array in the flag value file
-    pub boolean_offset: u32,
+    // The index of the first boolean flag in this aconfig package among all boolean
+    // flags in this container.
+    pub boolean_start_index: u32,
 }
 
 impl<'a> FlagPackage<'a> {
@@ -45,7 +45,7 @@
             package_id,
             flag_names: HashSet::new(),
             boolean_flags: vec![],
-            boolean_offset: 0,
+            boolean_start_index: 0,
         }
     }
 
@@ -73,12 +73,11 @@
         }
     }
 
-    // calculate package flag value start offset, in flag value file, each boolean
-    // is stored as a single byte
-    let mut boolean_offset = 0;
+    // cacluate boolean flag start index for each package
+    let mut boolean_start_index = 0;
     for p in packages.iter_mut() {
-        p.boolean_offset = boolean_offset;
-        boolean_offset += p.boolean_flags.len() as u32;
+        p.boolean_start_index = boolean_start_index;
+        boolean_start_index += p.boolean_flags.len() as u32;
     }
 
     packages
@@ -184,7 +183,7 @@
         assert!(packages[0].flag_names.contains("enabled_rw"));
         assert!(packages[0].flag_names.contains("disabled_rw"));
         assert!(packages[0].flag_names.contains("enabled_ro"));
-        assert_eq!(packages[0].boolean_offset, 0);
+        assert_eq!(packages[0].boolean_start_index, 0);
 
         assert_eq!(packages[1].package_name, "com.android.aconfig.storage.test_2");
         assert_eq!(packages[1].package_id, 1);
@@ -192,13 +191,13 @@
         assert!(packages[1].flag_names.contains("enabled_ro"));
         assert!(packages[1].flag_names.contains("disabled_ro"));
         assert!(packages[1].flag_names.contains("enabled_fixed_ro"));
-        assert_eq!(packages[1].boolean_offset, 3);
+        assert_eq!(packages[1].boolean_start_index, 3);
 
         assert_eq!(packages[2].package_name, "com.android.aconfig.storage.test_4");
         assert_eq!(packages[2].package_id, 2);
         assert_eq!(packages[2].flag_names.len(), 2);
         assert!(packages[2].flag_names.contains("enabled_ro"));
         assert!(packages[2].flag_names.contains("enabled_fixed_ro"));
-        assert_eq!(packages[2].boolean_offset, 6);
+        assert_eq!(packages[2].boolean_start_index, 6);
     }
 }
diff --git a/tools/aconfig/aconfig/src/storage/package_table.rs b/tools/aconfig/aconfig/src/storage/package_table.rs
index 0a3df77..c53602f 100644
--- a/tools/aconfig/aconfig/src/storage/package_table.rs
+++ b/tools/aconfig/aconfig/src/storage/package_table.rs
@@ -48,7 +48,7 @@
         let node = PackageTableNode {
             package_name: String::from(package.package_name),
             package_id: package.package_id,
-            boolean_offset: package.boolean_offset,
+            boolean_start_index: package.boolean_start_index,
             next_offset: None,
         };
         let bucket_index = PackageTableNode::find_bucket_index(package.package_name, num_buckets);
diff --git a/tools/aconfig/aconfig_device_paths/Android.bp b/tools/aconfig/aconfig_device_paths/Android.bp
new file mode 100644
index 0000000..21aa9a9
--- /dev/null
+++ b/tools/aconfig/aconfig_device_paths/Android.bp
@@ -0,0 +1,38 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+rust_defaults {
+    name: "libaconfig_device_paths.defaults",
+    edition: "2021",
+    clippy_lints: "android",
+    lints: "android",
+    srcs: ["src/lib.rs"],
+    rustlibs: [
+        "libaconfig_protos",
+        "libanyhow",
+        "libprotobuf",
+        "libregex",
+    ],
+}
+
+rust_library {
+    name: "libaconfig_device_paths",
+    crate_name: "aconfig_device_paths",
+    host_supported: true,
+    defaults: ["libaconfig_device_paths.defaults"],
+}
diff --git a/tools/aconfig/aconfig_device_paths/Cargo.toml b/tools/aconfig/aconfig_device_paths/Cargo.toml
new file mode 100644
index 0000000..dbe9b3a
--- /dev/null
+++ b/tools/aconfig/aconfig_device_paths/Cargo.toml
@@ -0,0 +1,9 @@
+[package]
+name = "aconfig_device_paths"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+anyhow = "1.0.82"
diff --git a/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt
new file mode 100644
index 0000000..3d2deb2
--- /dev/null
+++ b/tools/aconfig/aconfig_device_paths/partition_aconfig_flags_paths.txt
@@ -0,0 +1,6 @@
+[
+    "/system/etc/aconfig_flags.pb",
+    "/system_ext/etc/aconfig_flags.pb",
+    "/product/etc/aconfig_flags.pb",
+    "/vendor/etc/aconfig_flags.pb",
+]
diff --git a/tools/aconfig/aconfig_device_paths/src/lib.rs b/tools/aconfig/aconfig_device_paths/src/lib.rs
new file mode 100644
index 0000000..7bb62f4
--- /dev/null
+++ b/tools/aconfig/aconfig_device_paths/src/lib.rs
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Library for finding all aconfig on-device protobuf file paths.
+
+use anyhow::Result;
+use std::path::PathBuf;
+
+use std::fs;
+
+/// Determine all paths that contain an aconfig protobuf file.
+pub fn parsed_flags_proto_paths() -> Result<Vec<PathBuf>> {
+    let mut result: Vec<PathBuf> = include!("../partition_aconfig_flags_paths.txt")
+        .map(|s| PathBuf::from(s.to_string()))
+        .to_vec();
+    for dir in fs::read_dir("/apex")? {
+        let dir = dir?;
+
+        // Only scan the currently active version of each mainline module; skip the @version dirs.
+        if dir.file_name().as_encoded_bytes().iter().any(|&b| b == b'@') {
+            continue;
+        }
+
+        let mut path = PathBuf::from("/apex");
+        path.push(dir.path());
+        path.push("etc");
+        path.push("aconfig_flags.pb");
+        if path.exists() {
+            result.push(path);
+        }
+    }
+
+    Ok(result)
+}
diff --git a/tools/aconfig/aconfig_storage_file/Android.bp b/tools/aconfig/aconfig_storage_file/Android.bp
index b590312..d60ba92 100644
--- a/tools/aconfig/aconfig_storage_file/Android.bp
+++ b/tools/aconfig/aconfig_storage_file/Android.bp
@@ -56,7 +56,7 @@
     min_sdk_version: "29",
 }
 
-cc_library_static {
+cc_library {
     name: "libaconfig_storage_protos_cc",
     proto: {
         export_proto_headers: true,
diff --git a/tools/aconfig/aconfig_storage_file/protos/aconfig_storage_metadata.proto b/tools/aconfig/aconfig_storage_file/protos/aconfig_storage_metadata.proto
index c6728bd..7de43ca 100644
--- a/tools/aconfig/aconfig_storage_file/protos/aconfig_storage_metadata.proto
+++ b/tools/aconfig/aconfig_storage_file/protos/aconfig_storage_metadata.proto
@@ -26,7 +26,9 @@
   optional string package_map = 3;
   optional string flag_map = 4;
   optional string flag_val = 5;
-  optional int64 timestamp = 6;
+  optional string flag_info = 6;
+  optional string local_overrides = 7;
+  optional int64 timestamp = 8;
 }
 
 message storage_files {
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_info.rs b/tools/aconfig/aconfig_storage_file/src/flag_info.rs
index 3fff263..dc2a8d6 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_info.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_info.rs
@@ -91,9 +91,9 @@
 /// bit field for flag info
 #[derive(Clone, Debug, PartialEq, Eq)]
 pub enum FlagInfoBit {
-    IsSticky = 0,
-    IsReadWrite = 1,
-    HasOverride = 2,
+    IsSticky = 1 << 0,
+    IsReadWrite = 1 << 1,
+    HasOverride = 1 << 2,
 }
 
 /// Flag info node struct
@@ -108,9 +108,9 @@
         writeln!(
             f,
             "sticky: {}, readwrite: {}, override: {}",
-            self.attributes & (FlagInfoBit::IsSticky as u8),
-            self.attributes & (FlagInfoBit::IsReadWrite as u8),
-            self.attributes & (FlagInfoBit::HasOverride as u8),
+            self.attributes & (FlagInfoBit::IsSticky as u8) != 0,
+            self.attributes & (FlagInfoBit::IsReadWrite as u8) != 0,
+            self.attributes & (FlagInfoBit::HasOverride as u8) != 0,
         )?;
         Ok(())
     }
diff --git a/tools/aconfig/aconfig_storage_file/src/flag_table.rs b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
index f41f4ce..64b90ea 100644
--- a/tools/aconfig/aconfig_storage_file/src/flag_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/flag_table.rs
@@ -100,7 +100,8 @@
     pub package_id: u32,
     pub flag_name: String,
     pub flag_type: StoredFlagType,
-    pub flag_id: u16,
+    // within package flag index of this flag type
+    pub flag_index: u16,
     pub next_offset: Option<u32>,
 }
 
@@ -109,8 +110,8 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         writeln!(
             f,
-            "Package Id: {}, Flag: {}, Type: {:?}, Offset: {}, Next: {:?}",
-            self.package_id, self.flag_name, self.flag_type, self.flag_id, self.next_offset
+            "Package Id: {}, Flag: {}, Type: {:?}, Index: {}, Next: {:?}",
+            self.package_id, self.flag_name, self.flag_type, self.flag_index, self.next_offset
         )?;
         Ok(())
     }
@@ -125,7 +126,7 @@
         result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes());
         result.extend_from_slice(name_bytes);
         result.extend_from_slice(&(self.flag_type as u16).to_le_bytes());
-        result.extend_from_slice(&self.flag_id.to_le_bytes());
+        result.extend_from_slice(&self.flag_index.to_le_bytes());
         result.extend_from_slice(&self.next_offset.unwrap_or(0).to_le_bytes());
         result
     }
@@ -137,7 +138,7 @@
             package_id: read_u32_from_bytes(bytes, &mut head)?,
             flag_name: read_str_from_bytes(bytes, &mut head)?,
             flag_type: StoredFlagType::try_from(read_u16_from_bytes(bytes, &mut head)?)?,
-            flag_id: read_u16_from_bytes(bytes, &mut head)?,
+            flag_index: read_u16_from_bytes(bytes, &mut head)?,
             next_offset: match read_u32_from_bytes(bytes, &mut head)? {
                 0 => None,
                 val => Some(val),
diff --git a/tools/aconfig/aconfig_storage_file/src/lib.rs b/tools/aconfig/aconfig_storage_file/src/lib.rs
index d14bab6..070a3cf 100644
--- a/tools/aconfig/aconfig_storage_file/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_file/src/lib.rs
@@ -46,12 +46,14 @@
 use std::hash::{Hash, Hasher};
 use std::io::Read;
 
-pub use crate::flag_info::{FlagInfoHeader, FlagInfoList, FlagInfoNode};
+pub use crate::flag_info::{FlagInfoBit, FlagInfoHeader, FlagInfoList, FlagInfoNode};
 pub use crate::flag_table::{FlagTable, FlagTableHeader, FlagTableNode};
 pub use crate::flag_value::{FlagValueHeader, FlagValueList};
 pub use crate::package_table::{PackageTable, PackageTableHeader, PackageTableNode};
 
-use crate::AconfigStorageError::{BytesParseFail, HashTableSizeLimit, InvalidStoredFlagType};
+use crate::AconfigStorageError::{
+    BytesParseFail, HashTableSizeLimit, InvalidFlagValueType, InvalidStoredFlagType,
+};
 
 /// Storage file version
 pub const FILE_VERSION: u32 = 1;
@@ -103,6 +105,7 @@
 }
 
 /// Flag type enum as stored by storage file
+/// ONLY APPEND, NEVER REMOVE FOR BACKWARD COMPATIBILITY. THE MAX IS U16.
 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
 pub enum StoredFlagType {
     ReadWriteBoolean = 0,
@@ -123,6 +126,36 @@
     }
 }
 
+/// Flag value type enum, one FlagValueType maps to many StoredFlagType
+/// ONLY APPEND, NEVER REMOVE FOR BACKWARD COMPATIBILITY. THE MAX IS U16
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum FlagValueType {
+    Boolean = 0,
+}
+
+impl TryFrom<StoredFlagType> for FlagValueType {
+    type Error = AconfigStorageError;
+
+    fn try_from(value: StoredFlagType) -> Result<Self, Self::Error> {
+        match value {
+            StoredFlagType::ReadWriteBoolean => Ok(Self::Boolean),
+            StoredFlagType::ReadOnlyBoolean => Ok(Self::Boolean),
+            StoredFlagType::FixedReadOnlyBoolean => Ok(Self::Boolean),
+        }
+    }
+}
+
+impl TryFrom<u16> for FlagValueType {
+    type Error = AconfigStorageError;
+
+    fn try_from(value: u16) -> Result<Self, Self::Error> {
+        match value {
+            x if x == Self::Boolean as u16 => Ok(Self::Boolean),
+            _ => Err(InvalidFlagValueType(anyhow!("Invalid flag value type"))),
+        }
+    }
+}
+
 /// Storage query api error
 #[non_exhaustive]
 #[derive(thiserror::Error, Debug)]
@@ -162,6 +195,9 @@
 
     #[error("invalid stored flag type")]
     InvalidStoredFlagType(#[source] anyhow::Error),
+
+    #[error("invalid flag value type")]
+    InvalidFlagValueType(#[source] anyhow::Error),
 }
 
 /// Get the right hash table size given number of entries in the table. Use a
@@ -254,13 +290,13 @@
 
     let mut package_info = vec![("", 0); package_table.header.num_packages as usize];
     for node in package_table.nodes.iter() {
-        package_info[node.package_id as usize] = (&node.package_name, node.boolean_offset);
+        package_info[node.package_id as usize] = (&node.package_name, node.boolean_start_index);
     }
 
     let mut flags = Vec::new();
     for node in flag_table.nodes.iter() {
         let (package_name, package_offset) = package_info[node.package_id as usize];
-        let flag_offset = package_offset + node.flag_id as u32;
+        let flag_offset = package_offset + node.flag_index as u32;
         let flag_value = flag_value_list.booleans[flag_offset as usize];
         flags.push((
             String::from(package_name),
diff --git a/tools/aconfig/aconfig_storage_file/src/package_table.rs b/tools/aconfig/aconfig_storage_file/src/package_table.rs
index 36b0493..b734972 100644
--- a/tools/aconfig/aconfig_storage_file/src/package_table.rs
+++ b/tools/aconfig/aconfig_storage_file/src/package_table.rs
@@ -96,9 +96,9 @@
 pub struct PackageTableNode {
     pub package_name: String,
     pub package_id: u32,
-    // offset of the first boolean flag in this flag package with respect to the start of
-    // boolean flag value array in the flag value file
-    pub boolean_offset: u32,
+    // The index of the first boolean flag in this aconfig package among all boolean
+    // flags in this container.
+    pub boolean_start_index: u32,
     pub next_offset: Option<u32>,
 }
 
@@ -107,8 +107,8 @@
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         writeln!(
             f,
-            "Package: {}, Id: {}, Offset: {}, Next: {:?}",
-            self.package_name, self.package_id, self.boolean_offset, self.next_offset
+            "Package: {}, Id: {}, Boolean flag start index: {}, Next: {:?}",
+            self.package_name, self.package_id, self.boolean_start_index, self.next_offset
         )?;
         Ok(())
     }
@@ -122,7 +122,7 @@
         result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes());
         result.extend_from_slice(name_bytes);
         result.extend_from_slice(&self.package_id.to_le_bytes());
-        result.extend_from_slice(&self.boolean_offset.to_le_bytes());
+        result.extend_from_slice(&self.boolean_start_index.to_le_bytes());
         result.extend_from_slice(&self.next_offset.unwrap_or(0).to_le_bytes());
         result
     }
@@ -133,7 +133,7 @@
         let node = Self {
             package_name: read_str_from_bytes(bytes, &mut head)?,
             package_id: read_u32_from_bytes(bytes, &mut head)?,
-            boolean_offset: read_u32_from_bytes(bytes, &mut head)?,
+            boolean_start_index: read_u32_from_bytes(bytes, &mut head)?,
             next_offset: match read_u32_from_bytes(bytes, &mut head)? {
                 0 => None,
                 val => Some(val),
diff --git a/tools/aconfig/aconfig_storage_file/src/test_utils.rs b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
index c0f647a..608563c 100644
--- a/tools/aconfig/aconfig_storage_file/src/test_utils.rs
+++ b/tools/aconfig/aconfig_storage_file/src/test_utils.rs
@@ -38,19 +38,19 @@
     let first_node = PackageTableNode {
         package_name: String::from("com.android.aconfig.storage.test_2"),
         package_id: 1,
-        boolean_offset: 3,
+        boolean_start_index: 3,
         next_offset: None,
     };
     let second_node = PackageTableNode {
         package_name: String::from("com.android.aconfig.storage.test_1"),
         package_id: 0,
-        boolean_offset: 0,
+        boolean_start_index: 0,
         next_offset: Some(159),
     };
     let third_node = PackageTableNode {
         package_name: String::from("com.android.aconfig.storage.test_4"),
         package_id: 2,
-        boolean_offset: 6,
+        boolean_start_index: 6,
         next_offset: None,
     };
     let nodes = vec![first_node, second_node, third_node];
@@ -63,14 +63,14 @@
         package_id: u32,
         flag_name: &str,
         flag_type: u16,
-        flag_id: u16,
+        flag_index: u16,
         next_offset: Option<u32>,
     ) -> Self {
         Self {
             package_id,
             flag_name: flag_name.to_string(),
             flag_type: StoredFlagType::try_from(flag_type).unwrap(),
-            flag_id,
+            flag_index,
             next_offset,
         }
     }
diff --git a/tools/aconfig/aconfig_storage_read_api/Android.bp b/tools/aconfig/aconfig_storage_read_api/Android.bp
index b252e9d..a0980b6 100644
--- a/tools/aconfig/aconfig_storage_read_api/Android.bp
+++ b/tools/aconfig/aconfig_storage_read_api/Android.bp
@@ -38,6 +38,7 @@
         "tests/package.map",
         "tests/flag.map",
         "tests/flag.val",
+        "tests/flag.info",
     ],
 }
 
@@ -68,7 +69,7 @@
 }
 
 // flag read api cc interface
-cc_library_static {
+cc_library {
     name: "libaconfig_storage_read_api_cc",
     srcs: ["aconfig_storage_read_api.cpp"],
     generated_headers: [
@@ -82,5 +83,10 @@
         "libaconfig_storage_protos_cc",
         "libprotobuf-cpp-lite",
         "libbase",
+        "liblog",
+    ],
+    apex_available: [
+        "//apex_available:platform",
+        "//apex_available:anyapex",
     ],
 }
diff --git a/tools/aconfig/aconfig_storage_read_api/aconfig_storage_read_api.cpp b/tools/aconfig/aconfig_storage_read_api/aconfig_storage_read_api.cpp
index 2213831..ff2f38e 100644
--- a/tools/aconfig/aconfig_storage_read_api/aconfig_storage_read_api.cpp
+++ b/tools/aconfig/aconfig_storage_read_api/aconfig_storage_read_api.cpp
@@ -54,6 +54,8 @@
           return entry.flag_map();
         case StorageFileType::flag_val:
           return entry.flag_val();
+        case StorageFileType::flag_info:
+          return entry.flag_info();
         default:
           return Error() << "Invalid file type " << file_type;
       }
@@ -104,6 +106,19 @@
 
 } // namespace private internal api
 
+/// Map from StoredFlagType to FlagValueType
+android::base::Result<FlagValueType> map_to_flag_value_type(
+    StoredFlagType stored_type) {
+  switch (stored_type) {
+    case StoredFlagType::ReadWriteBoolean:
+    case StoredFlagType::ReadOnlyBoolean:
+    case StoredFlagType::FixedReadOnlyBoolean:
+      return FlagValueType::Boolean;
+    default:
+      return Error() << "Unsupported stored flag type";
+  }
+}
+
 /// Get mapped storage file
 Result<MappedStorageFile> get_mapped_file(
     std::string const& container,
@@ -124,49 +139,50 @@
   }
 }
 
-/// Get package offset
-Result<PackageOffset> get_package_offset(
+/// Get package context
+Result<PackageReadContext> get_package_read_context(
     MappedStorageFile const& file,
     std::string const& package) {
   auto content = rust::Slice<const uint8_t>(
       static_cast<uint8_t*>(file.file_ptr), file.file_size);
-  auto offset_cxx = get_package_offset_cxx(content, rust::Str(package.c_str()));
-  if (offset_cxx.query_success) {
-    auto offset = PackageOffset();
-    offset.package_exists = offset_cxx.package_exists;
-    offset.package_id = offset_cxx.package_id;
-    offset.boolean_offset = offset_cxx.boolean_offset;
-    return offset;
+  auto context_cxx = get_package_read_context_cxx(content, rust::Str(package.c_str()));
+  if (context_cxx.query_success) {
+    auto context = PackageReadContext();
+    context.package_exists = context_cxx.package_exists;
+    context.package_id = context_cxx.package_id;
+    context.boolean_start_index = context_cxx.boolean_start_index;
+    return context;
   } else {
-    return Error() << offset_cxx.error_message;
+    return Error() << context_cxx.error_message;
   }
 }
 
-/// Get flag offset
-Result<FlagOffset> get_flag_offset(
+/// Get flag read context
+Result<FlagReadContext> get_flag_read_context(
     MappedStorageFile const& file,
     uint32_t package_id,
     std::string const& flag_name){
   auto content = rust::Slice<const uint8_t>(
       static_cast<uint8_t*>(file.file_ptr), file.file_size);
-  auto offset_cxx = get_flag_offset_cxx(content, package_id, rust::Str(flag_name.c_str()));
-  if (offset_cxx.query_success) {
-    auto offset = FlagOffset();
-    offset.flag_exists = offset_cxx.flag_exists;
-    offset.flag_offset = offset_cxx.flag_offset;
-    return offset;
+  auto context_cxx = get_flag_read_context_cxx(content, package_id, rust::Str(flag_name.c_str()));
+  if (context_cxx.query_success) {
+    auto context = FlagReadContext();
+    context.flag_exists = context_cxx.flag_exists;
+    context.flag_type = static_cast<StoredFlagType>(context_cxx.flag_type);
+    context.flag_index = context_cxx.flag_index;
+    return context;
   } else {
-   return Error() << offset_cxx.error_message;
+   return Error() << context_cxx.error_message;
   }
 }
 
 /// Get boolean flag value
 Result<bool> get_boolean_flag_value(
     MappedStorageFile const& file,
-    uint32_t offset) {
+    uint32_t index) {
   auto content = rust::Slice<const uint8_t>(
       static_cast<uint8_t*>(file.file_ptr), file.file_size);
-  auto value_cxx = get_boolean_flag_value_cxx(content, offset);
+  auto value_cxx = get_boolean_flag_value_cxx(content, index);
   if (value_cxx.query_success) {
     return value_cxx.flag_value;
   } else {
@@ -174,4 +190,19 @@
   }
 }
 
+/// Get boolean flag attribute
+Result<uint8_t> get_flag_attribute(
+    MappedStorageFile const& file,
+    FlagValueType value_type,
+    uint32_t index) {
+  auto content = rust::Slice<const uint8_t>(
+      static_cast<uint8_t*>(file.file_ptr), file.file_size);
+  auto info_cxx = get_flag_attribute_cxx(
+      content, static_cast<uint16_t>(value_type), index);
+  if (info_cxx.query_success) {
+    return info_cxx.flag_attribute;
+  } else {
+    return Error() << info_cxx.error_message;
+  }
+}
 } // namespace aconfig_storage
diff --git a/tools/aconfig/aconfig_storage_read_api/include/aconfig_storage/aconfig_storage_read_api.hpp b/tools/aconfig/aconfig_storage_read_api/include/aconfig_storage/aconfig_storage_read_api.hpp
index aa90f47..7c63ef2 100644
--- a/tools/aconfig/aconfig_storage_read_api/include/aconfig_storage/aconfig_storage_read_api.hpp
+++ b/tools/aconfig/aconfig_storage_read_api/include/aconfig_storage/aconfig_storage_read_api.hpp
@@ -6,11 +6,35 @@
 
 namespace aconfig_storage {
 
-/// Storage file type enum
+/// Storage file type enum, to be consistent with the one defined in
+/// aconfig_storage_file/src/lib.rs
 enum StorageFileType {
   package_map,
   flag_map,
-  flag_val
+  flag_val,
+  flag_info
+};
+
+/// Flag type enum, to be consistent with the one defined in
+/// aconfig_storage_file/src/lib.rs
+enum StoredFlagType {
+  ReadWriteBoolean = 0,
+  ReadOnlyBoolean = 1,
+  FixedReadOnlyBoolean = 2,
+};
+
+/// Flag value type enum, to be consistent with the one defined in
+/// aconfig_storage_file/src/lib.rs
+enum FlagValueType {
+  Boolean = 0,
+};
+
+/// Flag info enum, to be consistent with the one defined in
+/// aconfig_storage_file/src/flag_info.rs
+enum FlagInfoBit {
+  IsSticky = 1<<0,
+  IsReadWrite = 1<<1,
+  HasOverride = 1<<2,
 };
 
 /// Mapped storage file
@@ -19,17 +43,18 @@
   size_t file_size;
 };
 
-/// Package offset query result
-struct PackageOffset {
+/// Package read context query result
+struct PackageReadContext {
   bool package_exists;
   uint32_t package_id;
-  uint32_t boolean_offset;
+  uint32_t boolean_start_index;
 };
 
-/// Flag offset query result
-struct FlagOffset {
+/// Flag read context query result
+struct FlagReadContext {
   bool flag_exists;
-  uint16_t flag_offset;
+  StoredFlagType flag_type;
+  uint16_t flag_index;
 };
 
 /// DO NOT USE APIS IN THE FOLLOWING NAMESPACE DIRECTLY
@@ -42,6 +67,12 @@
 
 } // namespace private_internal_api
 
+/// Map from StoredFlagType to FlagValueType
+/// \input stored_type: stored flag type in the storage file
+/// \returns the flag value type enum
+android::base::Result<FlagValueType> map_to_flag_value_type(
+    StoredFlagType stored_type);
+
 /// Get mapped storage file
 /// \input container: stoarge container name
 /// \input file_type: storage file type enum
@@ -56,30 +87,39 @@
 android::base::Result<uint32_t> get_storage_file_version(
     std::string const& file_path);
 
-/// Get package offset
+/// Get package read context
 /// \input file: mapped storage file
 /// \input package: the flag package name
-/// \returns a package offset
-android::base::Result<PackageOffset> get_package_offset(
+/// \returns a package read context
+android::base::Result<PackageReadContext> get_package_read_context(
     MappedStorageFile const& file,
     std::string const& package);
 
-/// Get flag offset
+/// Get flag read context
 /// \input file: mapped storage file
 /// \input package_id: the flag package id obtained from package offset query
 /// \input flag_name: flag name
-/// \returns the flag offset
-android::base::Result<FlagOffset> get_flag_offset(
+/// \returns the flag read context
+android::base::Result<FlagReadContext> get_flag_read_context(
     MappedStorageFile const& file,
     uint32_t package_id,
     std::string const& flag_name);
 
 /// Get boolean flag value
 /// \input file: mapped storage file
-/// \input offset: the boolean flag value byte offset in the file
+/// \input index: the boolean flag index in the file
 /// \returns the boolean flag value
 android::base::Result<bool> get_boolean_flag_value(
     MappedStorageFile const& file,
-    uint32_t offset);
+    uint32_t index);
 
+/// Get boolean flag attribute
+/// \input file: mapped storage file
+/// \input value_type: flag value type
+/// \input index: the boolean flag index in the file
+/// \returns the boolean flag attribute
+android::base::Result<uint8_t> get_flag_attribute(
+    MappedStorageFile const& file,
+    FlagValueType value_type,
+    uint32_t index);
 } // namespace aconfig_storage
diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs
new file mode 100644
index 0000000..e593418
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_read_api/src/flag_info_query.rs
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! flag value query module defines the flag value file read from mapped bytes
+
+use crate::{AconfigStorageError, FILE_VERSION};
+use aconfig_storage_file::{flag_info::FlagInfoHeader, read_u8_from_bytes, FlagValueType};
+use anyhow::anyhow;
+
+/// Get flag attribute bitfield
+pub fn find_flag_attribute(
+    buf: &[u8],
+    flag_type: FlagValueType,
+    flag_index: u32,
+) -> Result<u8, AconfigStorageError> {
+    let interpreted_header = FlagInfoHeader::from_bytes(buf)?;
+    if interpreted_header.version > crate::FILE_VERSION {
+        return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!(
+            "Cannot read storage file with a higher version of {} with lib version {}",
+            interpreted_header.version,
+            FILE_VERSION
+        )));
+    }
+
+    // get byte offset to the flag info
+    let mut head = match flag_type {
+        FlagValueType::Boolean => (interpreted_header.boolean_flag_offset + flag_index) as usize,
+    };
+
+    if head >= interpreted_header.file_size as usize {
+        return Err(AconfigStorageError::InvalidStorageFileOffset(anyhow!(
+            "Flag info offset goes beyond the end of the file."
+        )));
+    }
+
+    let val = read_u8_from_bytes(buf, &mut head)?;
+    Ok(val)
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use aconfig_storage_file::{test_utils::create_test_flag_info_list, FlagInfoBit};
+
+    #[test]
+    // this test point locks down query if flag is sticky
+    fn test_is_flag_sticky() {
+        let flag_info_list = create_test_flag_info_list().into_bytes();
+        for offset in 0..8 {
+            let attribute =
+                find_flag_attribute(&flag_info_list[..], FlagValueType::Boolean, offset).unwrap();
+            assert_eq!((attribute & FlagInfoBit::IsSticky as u8) != 0u8, false);
+        }
+    }
+
+    #[test]
+    // this test point locks down query if flag is readwrite
+    fn test_is_flag_readwrite() {
+        let flag_info_list = create_test_flag_info_list().into_bytes();
+        let baseline: Vec<bool> = vec![true, false, true, false, false, false, false, false];
+        for offset in 0..8 {
+            let attribute =
+                find_flag_attribute(&flag_info_list[..], FlagValueType::Boolean, offset).unwrap();
+            assert_eq!(
+                (attribute & FlagInfoBit::IsReadWrite as u8) != 0u8,
+                baseline[offset as usize]
+            );
+        }
+    }
+
+    #[test]
+    // this test point locks down query if flag has override
+    fn test_flag_has_override() {
+        let flag_info_list = create_test_flag_info_list().into_bytes();
+        for offset in 0..8 {
+            let attribute =
+                find_flag_attribute(&flag_info_list[..], FlagValueType::Boolean, offset).unwrap();
+            assert_eq!((attribute & FlagInfoBit::HasOverride as u8) != 0u8, false);
+        }
+    }
+
+    #[test]
+    // this test point locks down query beyond the end of boolean section
+    fn test_boolean_out_of_range() {
+        let flag_info_list = create_test_flag_info_list().into_bytes();
+        let error =
+            find_flag_attribute(&flag_info_list[..], FlagValueType::Boolean, 8).unwrap_err();
+        assert_eq!(
+            format!("{:?}", error),
+            "InvalidStorageFileOffset(Flag info offset goes beyond the end of the file.)"
+        );
+    }
+
+    #[test]
+    // this test point locks down query error when file has a higher version
+    fn test_higher_version_storage_file() {
+        let mut info_list = create_test_flag_info_list();
+        info_list.header.version = crate::FILE_VERSION + 1;
+        let flag_info = info_list.into_bytes();
+        let error = find_flag_attribute(&flag_info[..], FlagValueType::Boolean, 4).unwrap_err();
+        assert_eq!(
+            format!("{:?}", error),
+            format!(
+                "HigherStorageFileVersion(Cannot read storage file with a higher version of {} with lib version {})",
+                crate::FILE_VERSION + 1,
+                crate::FILE_VERSION
+            )
+        );
+    }
+}
diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs
index a251b41..55fdcb7 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/flag_table_query.rs
@@ -18,18 +18,23 @@
 
 use crate::{AconfigStorageError, FILE_VERSION};
 use aconfig_storage_file::{
-    flag_table::FlagTableHeader, flag_table::FlagTableNode, read_u32_from_bytes,
+    flag_table::FlagTableHeader, flag_table::FlagTableNode, read_u32_from_bytes, StoredFlagType,
 };
 use anyhow::anyhow;
 
-pub type FlagOffset = u16;
+/// Flag table query return
+#[derive(PartialEq, Debug)]
+pub struct FlagReadContext {
+    pub flag_type: StoredFlagType,
+    pub flag_index: u16,
+}
 
-/// Query flag within package offset
-pub fn find_flag_offset(
+/// Query flag read context: flag type and within package flag index
+pub fn find_flag_read_context(
     buf: &[u8],
     package_id: u32,
     flag: &str,
-) -> Result<Option<FlagOffset>, AconfigStorageError> {
+) -> Result<Option<FlagReadContext>, AconfigStorageError> {
     let interpreted_header = FlagTableHeader::from_bytes(buf)?;
     if interpreted_header.version > crate::FILE_VERSION {
         return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!(
@@ -53,7 +58,10 @@
     loop {
         let interpreted_node = FlagTableNode::from_bytes(&buf[flag_node_offset..])?;
         if interpreted_node.package_id == package_id && interpreted_node.flag_name == flag {
-            return Ok(Some(interpreted_node.flag_id));
+            return Ok(Some(FlagReadContext {
+                flag_type: interpreted_node.flag_type,
+                flag_index: interpreted_node.flag_index,
+            }));
         }
         match interpreted_node.next_offset {
             Some(offset) => flag_node_offset = offset as usize,
@@ -72,19 +80,20 @@
     fn test_flag_query() {
         let flag_table = create_test_flag_table().into_bytes();
         let baseline = vec![
-            (0, "enabled_ro", 1u16),
-            (0, "enabled_rw", 2u16),
-            (1, "disabled_ro", 0u16),
-            (2, "enabled_ro", 1u16),
-            (1, "enabled_fixed_ro", 1u16),
-            (1, "enabled_ro", 2u16),
-            (2, "enabled_fixed_ro", 0u16),
-            (0, "disabled_rw", 0u16),
+            (0, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (0, "enabled_rw", StoredFlagType::ReadWriteBoolean, 2u16),
+            (1, "disabled_ro", StoredFlagType::ReadOnlyBoolean, 0u16),
+            (2, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (1, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 1u16),
+            (1, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 2u16),
+            (2, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 0u16),
+            (0, "disabled_rw", StoredFlagType::ReadWriteBoolean, 0u16),
         ];
-        for (package_id, flag_name, expected_offset) in baseline.into_iter() {
-            let flag_offset =
-                find_flag_offset(&flag_table[..], package_id, flag_name).unwrap().unwrap();
-            assert_eq!(flag_offset, expected_offset);
+        for (package_id, flag_name, flag_type, flag_index) in baseline.into_iter() {
+            let flag_context =
+                find_flag_read_context(&flag_table[..], package_id, flag_name).unwrap().unwrap();
+            assert_eq!(flag_context.flag_type, flag_type);
+            assert_eq!(flag_context.flag_index, flag_index);
         }
     }
 
@@ -92,10 +101,10 @@
     // this test point locks down table query of a non exist flag
     fn test_not_existed_flag_query() {
         let flag_table = create_test_flag_table().into_bytes();
-        let flag_offset = find_flag_offset(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
-        assert_eq!(flag_offset, None);
-        let flag_offset = find_flag_offset(&flag_table[..], 2, "disabled_rw").unwrap();
-        assert_eq!(flag_offset, None);
+        let flag_context = find_flag_read_context(&flag_table[..], 1, "disabled_fixed_ro").unwrap();
+        assert_eq!(flag_context, None);
+        let flag_context = find_flag_read_context(&flag_table[..], 2, "disabled_rw").unwrap();
+        assert_eq!(flag_context, None);
     }
 
     #[test]
@@ -104,7 +113,7 @@
         let mut table = create_test_flag_table();
         table.header.version = crate::FILE_VERSION + 1;
         let flag_table = table.into_bytes();
-        let error = find_flag_offset(&flag_table[..], 0, "enabled_ro").unwrap_err();
+        let error = find_flag_read_context(&flag_table[..], 0, "enabled_ro").unwrap_err();
         assert_eq!(
             format!("{:?}", error),
             format!(
diff --git a/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs b/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs
index 964cd69..9d32a16 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/flag_value_query.rs
@@ -21,7 +21,7 @@
 use anyhow::anyhow;
 
 /// Query flag value
-pub fn find_boolean_flag_value(buf: &[u8], flag_offset: u32) -> Result<bool, AconfigStorageError> {
+pub fn find_boolean_flag_value(buf: &[u8], flag_index: u32) -> Result<bool, AconfigStorageError> {
     let interpreted_header = FlagValueHeader::from_bytes(buf)?;
     if interpreted_header.version > crate::FILE_VERSION {
         return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!(
@@ -31,10 +31,8 @@
         )));
     }
 
-    let mut head = (interpreted_header.boolean_value_offset + flag_offset) as usize;
-
-    // TODO: right now, there is only boolean flags, with more flag value types added
-    // later, the end of boolean flag value section should be updated (b/322826265).
+    // Find byte offset to the flag value, each boolean flag cost one byte to store
+    let mut head = (interpreted_header.boolean_value_offset + flag_index) as usize;
     if head >= interpreted_header.file_size as usize {
         return Err(AconfigStorageError::InvalidStorageFileOffset(anyhow!(
             "Flag value offset goes beyond the end of the file."
@@ -48,26 +46,13 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use aconfig_storage_file::{FlagValueList, StorageFileType};
-
-    pub fn create_test_flag_value_list() -> FlagValueList {
-        let header = FlagValueHeader {
-            version: crate::FILE_VERSION,
-            container: String::from("system"),
-            file_type: StorageFileType::FlagVal as u8,
-            file_size: 35,
-            num_flags: 8,
-            boolean_value_offset: 27,
-        };
-        let booleans: Vec<bool> = vec![false, true, false, false, true, true, false, true];
-        FlagValueList { header, booleans }
-    }
+    use aconfig_storage_file::test_utils::create_test_flag_value_list;
 
     #[test]
     // this test point locks down flag value query
     fn test_flag_value_query() {
         let flag_value_list = create_test_flag_value_list().into_bytes();
-        let baseline: Vec<bool> = vec![false, true, false, false, true, true, false, true];
+        let baseline: Vec<bool> = vec![false, true, true, false, true, true, true, true];
         for (offset, expected_value) in baseline.into_iter().enumerate() {
             let flag_value = find_boolean_flag_value(&flag_value_list[..], offset as u32).unwrap();
             assert_eq!(flag_value, expected_value);
diff --git a/tools/aconfig/aconfig_storage_read_api/src/lib.rs b/tools/aconfig/aconfig_storage_read_api/src/lib.rs
index da64cb7..bc09112 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/lib.rs
@@ -17,14 +17,16 @@
 //! `aconfig_storage_read_api` is a crate that defines read apis to read flags from storage
 //! files. It provides four apis to interface with storage files:
 //!
-//! 1, function to get package flag value start offset
-//! pub fn get_package_offset(container: &str, package: &str) -> `Result<Option<PackageOffset>>>`
+//! 1, function to get package read context
+//! pub fn get_packager_read_context(container: &str, package: &str)
+//! -> `Result<Option<PackageReadContext>>>`
 //!
-//! 2, function to get flag offset within a specific package
-//! pub fn get_flag_offset(container: &str, package_id: u32, flag: &str) -> `Result<Option<u16>>>`
+//! 2, function to get flag read context
+//! pub fn get_flag_read_context(container: &str, package_id: u32, flag: &str)
+//! -> `Result<Option<FlagReadContext>>>`
 //!
-//! 3, function to get the actual flag value given the global offset (combined package and
-//! flag offset).
+//! 3, function to get the actual flag value given the global index (combined package and
+//! flag index).
 //! pub fn get_boolean_flag_value(container: &str, offset: u32) -> `Result<bool>`
 //!
 //! 4, function to get storage file version without mmapping the file.
@@ -34,6 +36,7 @@
 //! apis. DO NOT DIRECTLY USE THESE APIS IN YOUR SOURCE CODE. For auto generated flag apis
 //! please refer to the g3doc go/android-flags
 
+pub mod flag_info_query;
 pub mod flag_table_query;
 pub mod flag_value_query;
 pub mod mapped_file;
@@ -42,14 +45,15 @@
 #[cfg(test)]
 mod test_utils;
 
-pub use aconfig_storage_file::{AconfigStorageError, StorageFileType};
-pub use flag_table_query::FlagOffset;
-pub use package_table_query::PackageOffset;
+pub use aconfig_storage_file::{AconfigStorageError, FlagValueType, StorageFileType};
+pub use flag_table_query::FlagReadContext;
+pub use package_table_query::PackageReadContext;
 
 use aconfig_storage_file::{read_u32_from_bytes, FILE_VERSION};
-use flag_table_query::find_flag_offset;
+use flag_info_query::find_flag_attribute;
+use flag_table_query::find_flag_read_context;
 use flag_value_query::find_boolean_flag_value;
-use package_table_query::find_package_offset;
+use package_table_query::find_package_read_context;
 
 use anyhow::anyhow;
 use memmap2::Mmap;
@@ -77,50 +81,50 @@
     unsafe { crate::mapped_file::get_mapped_file(STORAGE_LOCATION_FILE, container, file_type) }
 }
 
-/// Get package start offset for flags.
+/// Get package read context for a specific package.
 ///
 /// \input file: mapped package file
 /// \input package: package name
 ///
 /// \return
-/// If a package is found, it returns Ok(Some(PackageOffset))
+/// If a package is found, it returns Ok(Some(PackageReadContext))
 /// If a package is not found, it returns Ok(None)
 /// If errors out, it returns an Err(errmsg)
-pub fn get_package_offset(
+pub fn get_package_read_context(
     file: &Mmap,
     package: &str,
-) -> Result<Option<PackageOffset>, AconfigStorageError> {
-    find_package_offset(file, package)
+) -> Result<Option<PackageReadContext>, AconfigStorageError> {
+    find_package_read_context(file, package)
 }
 
-/// Get flag offset within a package given.
+/// Get flag read context for a specific flag.
 ///
 /// \input file: mapped flag file
 /// \input package_id: package id obtained from package mapping file
 /// \input flag: flag name
 ///
 /// \return
-/// If a flag is found, it returns Ok(Some(u16))
+/// If a flag is found, it returns Ok(Some(FlagReadContext))
 /// If a flag is not found, it returns Ok(None)
 /// If errors out, it returns an Err(errmsg)
-pub fn get_flag_offset(
+pub fn get_flag_read_context(
     file: &Mmap,
     package_id: u32,
     flag: &str,
-) -> Result<Option<FlagOffset>, AconfigStorageError> {
-    find_flag_offset(file, package_id, flag)
+) -> Result<Option<FlagReadContext>, AconfigStorageError> {
+    find_flag_read_context(file, package_id, flag)
 }
 
 /// Get the boolean flag value.
 ///
 /// \input file: mapped flag file
-/// \input offset: flag value offset
+/// \input index: boolean flag offset
 ///
 /// \return
 /// If the provide offset is valid, it returns the boolean flag value, otherwise it
 /// returns the error message.
-pub fn get_boolean_flag_value(file: &Mmap, offset: u32) -> Result<bool, AconfigStorageError> {
-    find_boolean_flag_value(file, offset)
+pub fn get_boolean_flag_value(file: &Mmap, index: u32) -> Result<bool, AconfigStorageError> {
+    find_boolean_flag_value(file, index)
 }
 
 /// Get storage file version number
@@ -145,6 +149,23 @@
     read_u32_from_bytes(&buffer, &mut head)
 }
 
+/// Get the flag attribute.
+///
+/// \input file: mapped flag info file
+/// \input flag_type: flag value type
+/// \input flag_index: flag index
+///
+/// \return
+/// If the provide offset is valid, it returns the flag attribute bitfiled, otherwise it
+/// returns the error message.
+pub fn get_flag_attribute(
+    file: &Mmap,
+    flag_type: FlagValueType,
+    flag_index: u32,
+) -> Result<u8, AconfigStorageError> {
+    find_flag_attribute(file, flag_type, flag_index)
+}
+
 // *************************************** //
 // CC INTERLOP
 // *************************************** //
@@ -160,20 +181,21 @@
     }
 
     // Package table query return for cc interlop
-    pub struct PackageOffsetQueryCXX {
+    pub struct PackageReadContextQueryCXX {
         pub query_success: bool,
         pub error_message: String,
         pub package_exists: bool,
         pub package_id: u32,
-        pub boolean_offset: u32,
+        pub boolean_start_index: u32,
     }
 
     // Flag table query return for cc interlop
-    pub struct FlagOffsetQueryCXX {
+    pub struct FlagReadContextQueryCXX {
         pub query_success: bool,
         pub error_message: String,
         pub flag_exists: bool,
-        pub flag_offset: u16,
+        pub flag_type: u16,
+        pub flag_index: u16,
     }
 
     // Flag value query return for cc interlop
@@ -183,21 +205,43 @@
         pub flag_value: bool,
     }
 
+    // Flag info query return for cc interlop
+    pub struct FlagAttributeQueryCXX {
+        pub query_success: bool,
+        pub error_message: String,
+        pub flag_attribute: u8,
+    }
+
     // Rust export to c++
     extern "Rust" {
         pub fn get_storage_file_version_cxx(file_path: &str) -> VersionNumberQueryCXX;
 
-        pub fn get_package_offset_cxx(file: &[u8], package: &str) -> PackageOffsetQueryCXX;
+        pub fn get_package_read_context_cxx(
+            file: &[u8],
+            package: &str,
+        ) -> PackageReadContextQueryCXX;
 
-        pub fn get_flag_offset_cxx(file: &[u8], package_id: u32, flag: &str) -> FlagOffsetQueryCXX;
+        pub fn get_flag_read_context_cxx(
+            file: &[u8],
+            package_id: u32,
+            flag: &str,
+        ) -> FlagReadContextQueryCXX;
 
         pub fn get_boolean_flag_value_cxx(file: &[u8], offset: u32) -> BooleanFlagValueQueryCXX;
+
+        pub fn get_flag_attribute_cxx(
+            file: &[u8],
+            flag_type: u16,
+            flag_index: u32,
+        ) -> FlagAttributeQueryCXX;
     }
 }
 
 /// Implement the package offset interlop return type, create from actual package offset api return type
-impl ffi::PackageOffsetQueryCXX {
-    pub(crate) fn new(offset_result: Result<Option<PackageOffset>, AconfigStorageError>) -> Self {
+impl ffi::PackageReadContextQueryCXX {
+    pub(crate) fn new(
+        offset_result: Result<Option<PackageReadContext>, AconfigStorageError>,
+    ) -> Self {
         match offset_result {
             Ok(offset_opt) => match offset_opt {
                 Some(offset) => Self {
@@ -205,14 +249,14 @@
                     error_message: String::from(""),
                     package_exists: true,
                     package_id: offset.package_id,
-                    boolean_offset: offset.boolean_offset,
+                    boolean_start_index: offset.boolean_start_index,
                 },
                 None => Self {
                     query_success: true,
                     error_message: String::from(""),
                     package_exists: false,
                     package_id: 0,
-                    boolean_offset: 0,
+                    boolean_start_index: 0,
                 },
             },
             Err(errmsg) => Self {
@@ -220,35 +264,38 @@
                 error_message: format!("{:?}", errmsg),
                 package_exists: false,
                 package_id: 0,
-                boolean_offset: 0,
+                boolean_start_index: 0,
             },
         }
     }
 }
 
 /// Implement the flag offset interlop return type, create from actual flag offset api return type
-impl ffi::FlagOffsetQueryCXX {
-    pub(crate) fn new(offset_result: Result<Option<FlagOffset>, AconfigStorageError>) -> Self {
+impl ffi::FlagReadContextQueryCXX {
+    pub(crate) fn new(offset_result: Result<Option<FlagReadContext>, AconfigStorageError>) -> Self {
         match offset_result {
             Ok(offset_opt) => match offset_opt {
                 Some(offset) => Self {
                     query_success: true,
                     error_message: String::from(""),
                     flag_exists: true,
-                    flag_offset: offset,
+                    flag_type: offset.flag_type as u16,
+                    flag_index: offset.flag_index,
                 },
                 None => Self {
                     query_success: true,
                     error_message: String::from(""),
                     flag_exists: false,
-                    flag_offset: 0,
+                    flag_type: 0u16,
+                    flag_index: 0u16,
                 },
             },
             Err(errmsg) => Self {
                 query_success: false,
                 error_message: format!("{:?}", errmsg),
                 flag_exists: false,
-                flag_offset: 0,
+                flag_type: 0u16,
+                flag_index: 0u16,
             },
         }
     }
@@ -270,6 +317,22 @@
     }
 }
 
+/// Implement the flag info interlop return type, create from actual flag info api return type
+impl ffi::FlagAttributeQueryCXX {
+    pub(crate) fn new(info_result: Result<u8, AconfigStorageError>) -> Self {
+        match info_result {
+            Ok(info) => {
+                Self { query_success: true, error_message: String::from(""), flag_attribute: info }
+            }
+            Err(errmsg) => Self {
+                query_success: false,
+                error_message: format!("{:?}", errmsg),
+                flag_attribute: 0u8,
+            },
+        }
+    }
+}
+
 /// Implement the storage version number interlop return type, create from actual version number
 /// api return type
 impl ffi::VersionNumberQueryCXX {
@@ -289,14 +352,18 @@
     }
 }
 
-/// Get package start offset cc interlop
-pub fn get_package_offset_cxx(file: &[u8], package: &str) -> ffi::PackageOffsetQueryCXX {
-    ffi::PackageOffsetQueryCXX::new(find_package_offset(file, package))
+/// Get package read context cc interlop
+pub fn get_package_read_context_cxx(file: &[u8], package: &str) -> ffi::PackageReadContextQueryCXX {
+    ffi::PackageReadContextQueryCXX::new(find_package_read_context(file, package))
 }
 
-/// Get flag start offset cc interlop
-pub fn get_flag_offset_cxx(file: &[u8], package_id: u32, flag: &str) -> ffi::FlagOffsetQueryCXX {
-    ffi::FlagOffsetQueryCXX::new(find_flag_offset(file, package_id, flag))
+/// Get flag read context cc interlop
+pub fn get_flag_read_context_cxx(
+    file: &[u8],
+    package_id: u32,
+    flag: &str,
+) -> ffi::FlagReadContextQueryCXX {
+    ffi::FlagReadContextQueryCXX::new(find_flag_read_context(file, package_id, flag))
 }
 
 /// Get boolean flag value cc interlop
@@ -304,6 +371,20 @@
     ffi::BooleanFlagValueQueryCXX::new(find_boolean_flag_value(file, offset))
 }
 
+/// Get flag attribute cc interlop
+pub fn get_flag_attribute_cxx(
+    file: &[u8],
+    flag_type: u16,
+    flag_index: u32,
+) -> ffi::FlagAttributeQueryCXX {
+    match FlagValueType::try_from(flag_type) {
+        Ok(value_type) => {
+            ffi::FlagAttributeQueryCXX::new(find_flag_attribute(file, value_type, flag_index))
+        }
+        Err(errmsg) => ffi::FlagAttributeQueryCXX::new(Err(errmsg)),
+    }
+}
+
 /// Get storage version number cc interlop
 pub fn get_storage_file_version_cxx(file_path: &str) -> ffi::VersionNumberQueryCXX {
     ffi::VersionNumberQueryCXX::new(get_storage_file_version(file_path))
@@ -315,12 +396,14 @@
     use crate::mapped_file::get_mapped_file;
     use crate::test_utils::copy_to_temp_file;
     use aconfig_storage_file::protos::storage_record_pb::write_proto_to_temp_file;
+    use aconfig_storage_file::{FlagInfoBit, StoredFlagType};
     use tempfile::NamedTempFile;
 
-    fn create_test_storage_files() -> [NamedTempFile; 4] {
+    fn create_test_storage_files() -> [NamedTempFile; 5] {
         let package_map = copy_to_temp_file("./tests/package.map").unwrap();
         let flag_map = copy_to_temp_file("./tests/flag.map").unwrap();
         let flag_val = copy_to_temp_file("./tests/flag.val").unwrap();
+        let flag_info = copy_to_temp_file("./tests/flag.info").unwrap();
 
         let text_proto = format!(
             r#"
@@ -330,77 +413,80 @@
     package_map: "{}"
     flag_map: "{}"
     flag_val: "{}"
+    flag_info: "{}"
     timestamp: 12345
 }}
 "#,
             package_map.path().display(),
             flag_map.path().display(),
-            flag_val.path().display()
+            flag_val.path().display(),
+            flag_info.path().display()
         );
         let pb_file = write_proto_to_temp_file(&text_proto).unwrap();
-        [package_map, flag_map, flag_val, pb_file]
+        [package_map, flag_map, flag_val, flag_info, pb_file]
     }
 
     #[test]
-    // this test point locks down flag package offset query
-    fn test_package_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    // this test point locks down flag package read context query
+    fn test_package_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         let package_mapped_file = unsafe {
             get_mapped_file(&pb_file_path, "mockup", StorageFileType::PackageMap).unwrap()
         };
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_1")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_1")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 0, boolean_offset: 0 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 0, boolean_start_index: 0 };
+        assert_eq!(package_context, expected_package_context);
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_2")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_2")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 1, boolean_offset: 3 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 1, boolean_start_index: 3 };
+        assert_eq!(package_context, expected_package_context);
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_4")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_4")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 2, boolean_offset: 6 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 2, boolean_start_index: 6 };
+        assert_eq!(package_context, expected_package_context);
     }
 
     #[test]
-    // this test point locks down flag offset query
-    fn test_flag_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    // this test point locks down flag read context query
+    fn test_flag_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         let flag_mapped_file =
             unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagMap).unwrap() };
 
         let baseline = vec![
-            (0, "enabled_ro", 1u16),
-            (0, "enabled_rw", 2u16),
-            (1, "disabled_ro", 0u16),
-            (2, "enabled_ro", 1u16),
-            (1, "enabled_fixed_ro", 1u16),
-            (1, "enabled_ro", 2u16),
-            (2, "enabled_fixed_ro", 0u16),
-            (0, "disabled_rw", 0u16),
+            (0, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (0, "enabled_rw", StoredFlagType::ReadWriteBoolean, 2u16),
+            (1, "disabled_ro", StoredFlagType::ReadOnlyBoolean, 0u16),
+            (2, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (1, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 1u16),
+            (1, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 2u16),
+            (2, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 0u16),
+            (0, "disabled_rw", StoredFlagType::ReadWriteBoolean, 0u16),
         ];
-        for (package_id, flag_name, expected_offset) in baseline.into_iter() {
-            let flag_offset =
-                get_flag_offset(&flag_mapped_file, package_id, flag_name).unwrap().unwrap();
-            assert_eq!(flag_offset, expected_offset);
+        for (package_id, flag_name, flag_type, flag_index) in baseline.into_iter() {
+            let flag_context =
+                get_flag_read_context(&flag_mapped_file, package_id, flag_name).unwrap().unwrap();
+            assert_eq!(flag_context.flag_type, flag_type);
+            assert_eq!(flag_context.flag_index, flag_index);
         }
     }
 
     #[test]
-    // this test point locks down flag offset query
+    // this test point locks down flag value query
     fn test_flag_value_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         let flag_value_file =
             unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagVal).unwrap() };
@@ -412,10 +498,28 @@
     }
 
     #[test]
+    // this test point locks donw flag info query
+    fn test_flag_info_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
+        let pb_file_path = pb_file.path().display().to_string();
+        let flag_info_file =
+            unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagInfo).unwrap() };
+        let is_rw: Vec<bool> = vec![true, false, true, false, false, false, false, false];
+        for (offset, expected_value) in is_rw.into_iter().enumerate() {
+            let attribute =
+                get_flag_attribute(&flag_info_file, FlagValueType::Boolean, offset as u32).unwrap();
+            assert!((attribute & FlagInfoBit::IsSticky as u8) == 0u8);
+            assert_eq!((attribute & FlagInfoBit::IsReadWrite as u8) != 0u8, expected_value);
+            assert!((attribute & FlagInfoBit::HasOverride as u8) == 0u8);
+        }
+    }
+
+    #[test]
     // this test point locks down flag storage file version number query api
     fn test_storage_version_query() {
         assert_eq!(get_storage_file_version("./tests/package.map").unwrap(), 1);
         assert_eq!(get_storage_file_version("./tests/flag.map").unwrap(), 1);
         assert_eq!(get_storage_file_version("./tests/flag.val").unwrap(), 1);
+        assert_eq!(get_storage_file_version("./tests/flag.info").unwrap(), 1);
     }
 }
diff --git a/tools/aconfig/aconfig_storage_read_api/src/mapped_file.rs b/tools/aconfig/aconfig_storage_read_api/src/mapped_file.rs
index 51354db..3786443 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/mapped_file.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/mapped_file.rs
@@ -29,7 +29,7 @@
 };
 
 /// Find where storage files are stored for a particular container
-fn find_container_storage_location(
+pub fn find_container_storage_location(
     location_pb_file: &str,
     container: &str,
 ) -> Result<ProtoStorageFileInfo, AconfigStorageError> {
@@ -91,9 +91,7 @@
         StorageFileType::PackageMap => unsafe { map_file(files_location.package_map()) },
         StorageFileType::FlagMap => unsafe { map_file(files_location.flag_map()) },
         StorageFileType::FlagVal => unsafe { map_file(files_location.flag_val()) },
-        StorageFileType::FlagInfo => {
-            Err(MapFileFail(anyhow!("TODO: add support for flag info file")))
-        }
+        StorageFileType::FlagInfo => unsafe { map_file(files_location.flag_info()) },
     }
 }
 
diff --git a/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs b/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs
index d83844e..2cb854b 100644
--- a/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs
+++ b/tools/aconfig/aconfig_storage_read_api/src/package_table_query.rs
@@ -24,16 +24,16 @@
 
 /// Package table query return
 #[derive(PartialEq, Debug)]
-pub struct PackageOffset {
+pub struct PackageReadContext {
     pub package_id: u32,
-    pub boolean_offset: u32,
+    pub boolean_start_index: u32,
 }
 
-/// Query package id and start offset
-pub fn find_package_offset(
+/// Query package read context: package id and start index
+pub fn find_package_read_context(
     buf: &[u8],
     package: &str,
-) -> Result<Option<PackageOffset>, AconfigStorageError> {
+) -> Result<Option<PackageReadContext>, AconfigStorageError> {
     let interpreted_header = PackageTableHeader::from_bytes(buf)?;
     if interpreted_header.version > FILE_VERSION {
         return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!(
@@ -57,9 +57,9 @@
     loop {
         let interpreted_node = PackageTableNode::from_bytes(&buf[package_node_offset..])?;
         if interpreted_node.package_name == package {
-            return Ok(Some(PackageOffset {
+            return Ok(Some(PackageReadContext {
                 package_id: interpreted_node.package_id,
-                boolean_offset: interpreted_node.boolean_offset,
+                boolean_start_index: interpreted_node.boolean_start_index,
             }));
         }
         match interpreted_node.next_offset {
@@ -78,24 +78,24 @@
     // this test point locks down table query
     fn test_package_query() {
         let package_table = create_test_package_table().into_bytes();
-        let package_offset =
-            find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
+        let package_context =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_1")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 0, boolean_offset: 0 };
-        assert_eq!(package_offset, expected_package_offset);
-        let package_offset =
-            find_package_offset(&package_table[..], "com.android.aconfig.storage.test_2")
+        let expected_package_context = PackageReadContext { package_id: 0, boolean_start_index: 0 };
+        assert_eq!(package_context, expected_package_context);
+        let package_context =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_2")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 1, boolean_offset: 3 };
-        assert_eq!(package_offset, expected_package_offset);
-        let package_offset =
-            find_package_offset(&package_table[..], "com.android.aconfig.storage.test_4")
+        let expected_package_context = PackageReadContext { package_id: 1, boolean_start_index: 3 };
+        assert_eq!(package_context, expected_package_context);
+        let package_context =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_4")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 2, boolean_offset: 6 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 2, boolean_start_index: 6 };
+        assert_eq!(package_context, expected_package_context);
     }
 
     #[test]
@@ -103,13 +103,15 @@
     fn test_not_existed_package_query() {
         // this will land at an empty bucket
         let package_table = create_test_package_table().into_bytes();
-        let package_offset =
-            find_package_offset(&package_table[..], "com.android.aconfig.storage.test_3").unwrap();
-        assert_eq!(package_offset, None);
+        let package_context =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_3")
+                .unwrap();
+        assert_eq!(package_context, None);
         // this will land at the end of a linked list
-        let package_offset =
-            find_package_offset(&package_table[..], "com.android.aconfig.storage.test_5").unwrap();
-        assert_eq!(package_offset, None);
+        let package_context =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_5")
+                .unwrap();
+        assert_eq!(package_context, None);
     }
 
     #[test]
@@ -118,8 +120,9 @@
         let mut table = create_test_package_table();
         table.header.version = crate::FILE_VERSION + 1;
         let package_table = table.into_bytes();
-        let error = find_package_offset(&package_table[..], "com.android.aconfig.storage.test_1")
-            .unwrap_err();
+        let error =
+            find_package_read_context(&package_table[..], "com.android.aconfig.storage.test_1")
+                .unwrap_err();
         assert_eq!(
             format!("{:?}", error),
             format!(
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/Android.bp b/tools/aconfig/aconfig_storage_read_api/tests/Android.bp
index d9cf238..6b05ca6 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/Android.bp
+++ b/tools/aconfig/aconfig_storage_read_api/tests/Android.bp
@@ -14,6 +14,7 @@
         "package.map",
         "flag.map",
         "flag.val",
+        "flag.info",
     ],
     test_suites: ["general-tests"],
 }
@@ -35,6 +36,7 @@
         "package.map",
         "flag.map",
         "flag.val",
+        "flag.info",
     ],
     test_suites: [
         "device-tests",
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/flag.info b/tools/aconfig/aconfig_storage_read_api/tests/flag.info
new file mode 100644
index 0000000..820d839
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_read_api/tests/flag.info
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
index 539474b..10f71a5 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
+++ b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.cpp
@@ -47,7 +47,8 @@
 
   Result<std::string> write_storage_location_pb_file(std::string const& package_map,
                                                      std::string const& flag_map,
-                                                     std::string const& flag_val) {
+                                                     std::string const& flag_val,
+                                                     std::string const& flag_info) {
     auto temp_file = std::tmpnam(nullptr);
     auto proto = storage_files();
     auto* info = proto.add_files();
@@ -56,6 +57,7 @@
     info->set_package_map(package_map);
     info->set_flag_map(flag_map);
     info->set_flag_val(flag_val);
+    info->set_flag_info(flag_info);
     info->set_timestamp(12345);
 
     auto content = std::string();
@@ -71,20 +73,23 @@
     package_map = *copy_to_temp_file(test_dir + "/package.map");
     flag_map = *copy_to_temp_file(test_dir + "/flag.map");
     flag_val = *copy_to_temp_file(test_dir + "/flag.val");
+    flag_info = *copy_to_temp_file(test_dir + "/flag.info");
     storage_record_pb = *write_storage_location_pb_file(
-        package_map, flag_map, flag_val);
+        package_map, flag_map, flag_val, flag_info);
   }
 
   void TearDown() override {
     std::remove(package_map.c_str());
     std::remove(flag_map.c_str());
     std::remove(flag_val.c_str());
+    std::remove(flag_info.c_str());
     std::remove(storage_record_pb.c_str());
   }
 
   std::string package_map;
   std::string flag_map;
   std::string flag_val;
+  std::string flag_info;
   std::string storage_record_pb;
 };
 
@@ -99,6 +104,9 @@
   version = api::get_storage_file_version(flag_val);
   ASSERT_TRUE(version.ok());
   ASSERT_EQ(*version, 1);
+  version = api::get_storage_file_version(flag_info);
+  ASSERT_TRUE(version.ok());
+  ASSERT_EQ(*version, 1);
 }
 
 /// Negative test to lock down the error when mapping none exist storage files
@@ -110,83 +118,84 @@
             "Unable to find storage files for container vendor");
 }
 
-/// Test to lock down storage package offset query api
-TEST_F(AconfigStorageTest, test_package_offset_query) {
+/// Test to lock down storage package context query api
+TEST_F(AconfigStorageTest, test_package_context_query) {
   auto mapped_file = private_api::get_mapped_file_impl(
       storage_record_pb, "mockup", api::StorageFileType::package_map);
   ASSERT_TRUE(mapped_file.ok());
 
-  auto offset = api::get_package_offset(
+  auto context = api::get_package_read_context(
       *mapped_file, "com.android.aconfig.storage.test_1");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_TRUE(offset->package_exists);
-  ASSERT_EQ(offset->package_id, 0);
-  ASSERT_EQ(offset->boolean_offset, 0);
+  ASSERT_TRUE(context.ok());
+  ASSERT_TRUE(context->package_exists);
+  ASSERT_EQ(context->package_id, 0);
+  ASSERT_EQ(context->boolean_start_index, 0);
 
-  offset = api::get_package_offset(
+  context = api::get_package_read_context(
       *mapped_file, "com.android.aconfig.storage.test_2");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_TRUE(offset->package_exists);
-  ASSERT_EQ(offset->package_id, 1);
-  ASSERT_EQ(offset->boolean_offset, 3);
+  ASSERT_TRUE(context.ok());
+  ASSERT_TRUE(context->package_exists);
+  ASSERT_EQ(context->package_id, 1);
+  ASSERT_EQ(context->boolean_start_index, 3);
 
-  offset = api::get_package_offset(
+  context = api::get_package_read_context(
       *mapped_file, "com.android.aconfig.storage.test_4");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_TRUE(offset->package_exists);
-  ASSERT_EQ(offset->package_id, 2);
-  ASSERT_EQ(offset->boolean_offset, 6);
+  ASSERT_TRUE(context.ok());
+  ASSERT_TRUE(context->package_exists);
+  ASSERT_EQ(context->package_id, 2);
+  ASSERT_EQ(context->boolean_start_index, 6);
 }
 
 /// Test to lock down when querying none exist package
-TEST_F(AconfigStorageTest, test_none_existent_package_offset_query) {
+TEST_F(AconfigStorageTest, test_none_existent_package_context_query) {
   auto mapped_file = private_api::get_mapped_file_impl(
       storage_record_pb, "mockup", api::StorageFileType::package_map);
   ASSERT_TRUE(mapped_file.ok());
 
-  auto offset = api::get_package_offset(
+  auto context = api::get_package_read_context(
       *mapped_file, "com.android.aconfig.storage.test_3");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_FALSE(offset->package_exists);
+  ASSERT_TRUE(context.ok());
+  ASSERT_FALSE(context->package_exists);
 }
 
-/// Test to lock down storage flag offset query api
-TEST_F(AconfigStorageTest, test_flag_offset_query) {
+/// Test to lock down storage flag context query api
+TEST_F(AconfigStorageTest, test_flag_context_query) {
   auto mapped_file = private_api::get_mapped_file_impl(
       storage_record_pb, "mockup", api::StorageFileType::flag_map);
   ASSERT_TRUE(mapped_file.ok());
 
-  auto baseline = std::vector<std::tuple<int, std::string, int>>{
-    {0, "enabled_ro", 1},
-    {0, "enabled_rw", 2},
-    {1, "disabled_ro", 0},
-    {2, "enabled_ro", 1},
-    {1, "enabled_fixed_ro", 1},
-    {1, "enabled_ro", 2},
-    {2, "enabled_fixed_ro", 0},
-    {0, "disabled_rw", 0},
+  auto baseline = std::vector<std::tuple<int, std::string, api::StoredFlagType, int>>{
+    {0, "enabled_ro", api::StoredFlagType::ReadOnlyBoolean, 1},
+    {0, "enabled_rw", api::StoredFlagType::ReadWriteBoolean, 2},
+    {1, "disabled_ro", api::StoredFlagType::ReadOnlyBoolean, 0},
+    {2, "enabled_ro", api::StoredFlagType::ReadOnlyBoolean, 1},
+    {1, "enabled_fixed_ro", api::StoredFlagType::FixedReadOnlyBoolean, 1},
+    {1, "enabled_ro", api::StoredFlagType::ReadOnlyBoolean, 2},
+    {2, "enabled_fixed_ro", api::StoredFlagType::FixedReadOnlyBoolean, 0},
+    {0, "disabled_rw", api::StoredFlagType::ReadWriteBoolean, 0},
   };
-  for (auto const&[package_id, flag_name, expected_offset] : baseline) {
-    auto offset = api::get_flag_offset(*mapped_file, package_id, flag_name);
-    ASSERT_TRUE(offset.ok());
-    ASSERT_TRUE(offset->flag_exists);
-    ASSERT_EQ(offset->flag_offset, expected_offset);
+  for (auto const&[package_id, flag_name, flag_type, flag_index] : baseline) {
+    auto context = api::get_flag_read_context(*mapped_file, package_id, flag_name);
+    ASSERT_TRUE(context.ok());
+    ASSERT_TRUE(context->flag_exists);
+    ASSERT_EQ(context->flag_type, flag_type);
+    ASSERT_EQ(context->flag_index, flag_index);
   }
 }
 
 /// Test to lock down when querying none exist flag
-TEST_F(AconfigStorageTest, test_none_existent_flag_offset_query) {
+TEST_F(AconfigStorageTest, test_none_existent_flag_context_query) {
   auto mapped_file = private_api::get_mapped_file_impl(
       storage_record_pb, "mockup", api::StorageFileType::flag_map);
   ASSERT_TRUE(mapped_file.ok());
 
-  auto offset = api::get_flag_offset(*mapped_file, 0, "none_exist");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_FALSE(offset->flag_exists);
+  auto context = api::get_flag_read_context(*mapped_file, 0, "none_exist");
+  ASSERT_TRUE(context.ok());
+  ASSERT_FALSE(context->flag_exists);
 
-  offset = api::get_flag_offset(*mapped_file, 3, "enabled_ro");
-  ASSERT_TRUE(offset.ok());
-  ASSERT_FALSE(offset->flag_exists);
+  context = api::get_flag_read_context(*mapped_file, 3, "enabled_ro");
+  ASSERT_TRUE(context.ok());
+  ASSERT_FALSE(context->flag_exists);
 }
 
 /// Test to lock down storage flag value query api
@@ -197,10 +206,10 @@
 
   auto expected_value = std::vector<bool>{
     false, true, true, false, true, true, true, true};
-  for (int offset = 0; offset < 8; ++offset) {
-    auto value = api::get_boolean_flag_value(*mapped_file, offset);
+  for (int index = 0; index < 8; ++index) {
+    auto value = api::get_boolean_flag_value(*mapped_file, index);
     ASSERT_TRUE(value.ok());
-    ASSERT_EQ(*value, expected_value[offset]);
+    ASSERT_EQ(*value, expected_value[index]);
   }
 }
 
@@ -215,3 +224,33 @@
   ASSERT_EQ(value.error().message(),
             std::string("InvalidStorageFileOffset(Flag value offset goes beyond the end of the file.)"));
 }
+
+/// Test to lock down storage flag info query api
+TEST_F(AconfigStorageTest, test_boolean_flag_info_query) {
+  auto mapped_file = private_api::get_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_info);
+  ASSERT_TRUE(mapped_file.ok());
+
+  auto expected_value = std::vector<bool>{
+    true, false, true, false, false, false, false, false};
+  for (int index = 0; index < 8; ++index) {
+    auto attribute = api::get_flag_attribute(*mapped_file, api::FlagValueType::Boolean, index);
+    ASSERT_TRUE(attribute.ok());
+    ASSERT_EQ(*attribute & static_cast<uint8_t>(api::FlagInfoBit::IsSticky), 0);
+    ASSERT_EQ((*attribute & static_cast<uint8_t>(api::FlagInfoBit::IsReadWrite)) != 0,
+              expected_value[index]);
+    ASSERT_EQ(*attribute & static_cast<uint8_t>(api::FlagInfoBit::HasOverride), 0);
+  }
+}
+
+/// Negative test to lock down the error when querying flag info out of range
+TEST_F(AconfigStorageTest, test_invalid_boolean_flag_info_query) {
+  auto mapped_file = private_api::get_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_info);
+  ASSERT_TRUE(mapped_file.ok());
+
+  auto attribute = api::get_flag_attribute(*mapped_file, api::FlagValueType::Boolean, 8);
+  ASSERT_FALSE(attribute.ok());
+  ASSERT_EQ(attribute.error().message(),
+            std::string("InvalidStorageFileOffset(Flag info offset goes beyond the end of the file.)"));
+}
diff --git a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
index 7687d0f..212f734 100644
--- a/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
+++ b/tools/aconfig/aconfig_storage_read_api/tests/storage_read_api_test.rs
@@ -1,10 +1,11 @@
 #[cfg(not(feature = "cargo"))]
 mod aconfig_storage_rust_test {
     use aconfig_storage_file::protos::storage_record_pb::write_proto_to_temp_file;
-    use aconfig_storage_file::StorageFileType;
+    use aconfig_storage_file::{FlagInfoBit, FlagValueType, StorageFileType, StoredFlagType};
     use aconfig_storage_read_api::{
-        get_boolean_flag_value, get_flag_offset, get_package_offset, get_storage_file_version,
-        mapped_file::get_mapped_file, PackageOffset,
+        get_boolean_flag_value, get_flag_attribute, get_flag_read_context,
+        get_package_read_context, get_storage_file_version, mapped_file::get_mapped_file,
+        PackageReadContext,
     };
     use std::fs;
     use tempfile::NamedTempFile;
@@ -15,10 +16,11 @@
         file
     }
 
-    fn create_test_storage_files() -> [NamedTempFile; 4] {
+    fn create_test_storage_files() -> [NamedTempFile; 5] {
         let package_map = copy_to_temp_file("./package.map");
         let flag_map = copy_to_temp_file("./flag.map");
         let flag_val = copy_to_temp_file("./flag.val");
+        let flag_info = copy_to_temp_file("./flag.info");
 
         let text_proto = format!(
             r#"
@@ -28,20 +30,22 @@
     package_map: "{}"
     flag_map: "{}"
     flag_val: "{}"
+    flag_info: "{}"
     timestamp: 12345
 }}
 "#,
             package_map.path().display(),
             flag_map.path().display(),
-            flag_val.path().display()
+            flag_val.path().display(),
+            flag_info.path().display()
         );
         let pb_file = write_proto_to_temp_file(&text_proto).unwrap();
-        [package_map, flag_map, flag_val, pb_file]
+        [package_map, flag_map, flag_val, flag_info, pb_file]
     }
 
     #[test]
     fn test_unavailable_stoarge() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -55,8 +59,8 @@
     }
 
     #[test]
-    fn test_package_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    fn test_package_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -64,31 +68,31 @@
             get_mapped_file(&pb_file_path, "mockup", StorageFileType::PackageMap).unwrap()
         };
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_1")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_1")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 0, boolean_offset: 0 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 0, boolean_start_index: 0 };
+        assert_eq!(package_context, expected_package_context);
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_2")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_2")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 1, boolean_offset: 3 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 1, boolean_start_index: 3 };
+        assert_eq!(package_context, expected_package_context);
 
-        let package_offset =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_4")
+        let package_context =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_4")
                 .unwrap()
                 .unwrap();
-        let expected_package_offset = PackageOffset { package_id: 2, boolean_offset: 6 };
-        assert_eq!(package_offset, expected_package_offset);
+        let expected_package_context = PackageReadContext { package_id: 2, boolean_start_index: 6 };
+        assert_eq!(package_context, expected_package_context);
     }
 
     #[test]
-    fn test_none_exist_package_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    fn test_none_exist_package_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -96,14 +100,15 @@
             get_mapped_file(&pb_file_path, "mockup", StorageFileType::PackageMap).unwrap()
         };
 
-        let package_offset_option =
-            get_package_offset(&package_mapped_file, "com.android.aconfig.storage.test_3").unwrap();
-        assert_eq!(package_offset_option, None);
+        let package_context_option =
+            get_package_read_context(&package_mapped_file, "com.android.aconfig.storage.test_3")
+                .unwrap();
+        assert_eq!(package_context_option, None);
     }
 
     #[test]
-    fn test_flag_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    fn test_flag_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -111,40 +116,43 @@
             unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagMap).unwrap() };
 
         let baseline = vec![
-            (0, "enabled_ro", 1u16),
-            (0, "enabled_rw", 2u16),
-            (1, "disabled_ro", 0u16),
-            (2, "enabled_ro", 1u16),
-            (1, "enabled_fixed_ro", 1u16),
-            (1, "enabled_ro", 2u16),
-            (2, "enabled_fixed_ro", 0u16),
-            (0, "disabled_rw", 0u16),
+            (0, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (0, "enabled_rw", StoredFlagType::ReadWriteBoolean, 2u16),
+            (1, "disabled_ro", StoredFlagType::ReadOnlyBoolean, 0u16),
+            (2, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 1u16),
+            (1, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 1u16),
+            (1, "enabled_ro", StoredFlagType::ReadOnlyBoolean, 2u16),
+            (2, "enabled_fixed_ro", StoredFlagType::FixedReadOnlyBoolean, 0u16),
+            (0, "disabled_rw", StoredFlagType::ReadWriteBoolean, 0u16),
         ];
-        for (package_id, flag_name, expected_offset) in baseline.into_iter() {
-            let flag_offset =
-                get_flag_offset(&flag_mapped_file, package_id, flag_name).unwrap().unwrap();
-            assert_eq!(flag_offset, expected_offset);
+        for (package_id, flag_name, flag_type, flag_index) in baseline.into_iter() {
+            let flag_context =
+                get_flag_read_context(&flag_mapped_file, package_id, flag_name).unwrap().unwrap();
+            assert_eq!(flag_context.flag_type, flag_type);
+            assert_eq!(flag_context.flag_index, flag_index);
         }
     }
 
     #[test]
-    fn test_none_exist_flag_offset_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+    fn test_none_exist_flag_context_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
         let flag_mapped_file =
             unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagMap).unwrap() };
-        let flag_offset_option = get_flag_offset(&flag_mapped_file, 0, "none_exist").unwrap();
-        assert_eq!(flag_offset_option, None);
+        let flag_context_option =
+            get_flag_read_context(&flag_mapped_file, 0, "none_exist").unwrap();
+        assert_eq!(flag_context_option, None);
 
-        let flag_offset_option = get_flag_offset(&flag_mapped_file, 3, "enabled_ro").unwrap();
-        assert_eq!(flag_offset_option, None);
+        let flag_context_option =
+            get_flag_read_context(&flag_mapped_file, 3, "enabled_ro").unwrap();
+        assert_eq!(flag_context_option, None);
     }
 
     #[test]
     fn test_boolean_flag_value_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -159,7 +167,7 @@
 
     #[test]
     fn test_invalid_boolean_flag_value_query() {
-        let [_package_map, _flag_map, _flag_val, pb_file] = create_test_storage_files();
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
         let pb_file_path = pb_file.path().display().to_string();
         // SAFETY:
         // The safety here is ensured as the test process will not write to temp storage file
@@ -173,9 +181,43 @@
     }
 
     #[test]
+    fn test_flag_info_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
+        let pb_file_path = pb_file.path().display().to_string();
+        // SAFETY:
+        // The safety here is ensured as the test process will not write to temp storage file
+        let flag_info_file =
+            unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagInfo).unwrap() };
+        let is_rw: Vec<bool> = vec![true, false, true, false, false, false, false, false];
+        for (offset, expected_value) in is_rw.into_iter().enumerate() {
+            let attribute =
+                get_flag_attribute(&flag_info_file, FlagValueType::Boolean, offset as u32).unwrap();
+            assert!((attribute & FlagInfoBit::IsSticky as u8) == 0u8);
+            assert_eq!((attribute & FlagInfoBit::IsReadWrite as u8) != 0u8, expected_value);
+            assert!((attribute & FlagInfoBit::HasOverride as u8) == 0u8);
+        }
+    }
+
+    #[test]
+    fn test_invalid_boolean_flag_info_query() {
+        let [_package_map, _flag_map, _flag_val, _flag_info, pb_file] = create_test_storage_files();
+        let pb_file_path = pb_file.path().display().to_string();
+        // SAFETY:
+        // The safety here is ensured as the test process will not write to temp storage file
+        let flag_info_file =
+            unsafe { get_mapped_file(&pb_file_path, "mockup", StorageFileType::FlagInfo).unwrap() };
+        let err = get_flag_attribute(&flag_info_file, FlagValueType::Boolean, 8u32).unwrap_err();
+        assert_eq!(
+            format!("{:?}", err),
+            "InvalidStorageFileOffset(Flag info offset goes beyond the end of the file.)"
+        );
+    }
+
+    #[test]
     fn test_storage_version_query() {
         assert_eq!(get_storage_file_version("./package.map").unwrap(), 1);
         assert_eq!(get_storage_file_version("./flag.map").unwrap(), 1);
         assert_eq!(get_storage_file_version("./flag.val").unwrap(), 1);
+        assert_eq!(get_storage_file_version("./flag.info").unwrap(), 1);
     }
 }
diff --git a/tools/aconfig/aconfig_storage_write_api/Android.bp b/tools/aconfig/aconfig_storage_write_api/Android.bp
index 0f15b9c..4dbdbbf 100644
--- a/tools/aconfig/aconfig_storage_write_api/Android.bp
+++ b/tools/aconfig/aconfig_storage_write_api/Android.bp
@@ -14,6 +14,7 @@
         "libcxx",
         "libthiserror",
         "libaconfig_storage_file",
+        "libaconfig_storage_read_api",
     ],
 }
 
@@ -30,6 +31,7 @@
     defaults: ["aconfig_storage_write_api.defaults"],
     data: [
         "tests/flag.val",
+        "tests/flag.info",
     ],
     rustlibs: [
         "libaconfig_storage_read_api",
@@ -68,12 +70,13 @@
     srcs: ["aconfig_storage_write_api.cpp"],
     generated_headers: [
         "cxx-bridge-header",
-        "libcxx_aconfig_storage_write_api_bridge_header"
+        "libcxx_aconfig_storage_write_api_bridge_header",
     ],
     generated_sources: ["libcxx_aconfig_storage_write_api_bridge_code"],
     whole_static_libs: ["libaconfig_storage_write_api_cxx_bridge"],
     export_include_dirs: ["include"],
     static_libs: [
+        "libaconfig_storage_read_api_cc",
         "libaconfig_storage_protos_cc",
         "libprotobuf-cpp-lite",
         "libbase",
diff --git a/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp b/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp
index ea88f05..d57ca64 100644
--- a/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp
+++ b/tools/aconfig/aconfig_storage_write_api/aconfig_storage_write_api.cpp
@@ -38,7 +38,8 @@
 /// Get storage file path
 static Result<std::string> find_storage_file(
     std::string const& pb_file,
-    std::string const& container) {
+    std::string const& container,
+    StorageFileType file_type) {
   auto records_pb = read_storage_records_pb(pb_file);
   if (!records_pb.ok()) {
     return Error() << "Unable to read storage records from " << pb_file
@@ -47,15 +48,49 @@
 
   for (auto& entry : records_pb->files()) {
     if (entry.container() == container) {
-        return entry.flag_val();
+      switch(file_type) {
+        case StorageFileType::package_map:
+          return entry.package_map();
+        case StorageFileType::flag_map:
+          return entry.flag_map();
+        case StorageFileType::flag_val:
+          return entry.flag_val();
+        case StorageFileType::flag_info:
+          return entry.flag_info();
+        default:
+          return Error() << "Invalid file type " << file_type;
+      }
     }
   }
 
-  return Error() << "Unable to find storage files for container " << container;;
+  return Error() << "Unable to find storage files for container " << container;
 }
 
+
+namespace private_internal_api {
+
+/// Get mutable mapped file implementation.
+Result<MutableMappedStorageFile> get_mutable_mapped_file_impl(
+    std::string const& pb_file,
+    std::string const& container,
+    StorageFileType file_type) {
+  if (file_type != StorageFileType::flag_val &&
+      file_type != StorageFileType::flag_info) {
+    return Error() << "Cannot create mutable mapped file for this file type";
+  }
+
+  auto file_result = find_storage_file(pb_file, container, file_type);
+  if (!file_result.ok()) {
+    return Error() << file_result.error();
+  }
+
+  return map_mutable_storage_file(*file_result);
+}
+
+} // namespace private internal api
+
 /// Map a storage file
-static Result<MappedFlagValueFile> map_storage_file(std::string const& file) {
+Result<MutableMappedStorageFile> map_mutable_storage_file(std::string const& file) {
   struct stat file_stat;
   if (stat(file.c_str(), &file_stat) < 0) {
     return ErrnoError() << "stat failed";
@@ -78,43 +113,24 @@
     return ErrnoError() << "mmap failed";
   }
 
-  auto mapped_file = MappedFlagValueFile();
+  auto mapped_file = MutableMappedStorageFile();
   mapped_file.file_ptr = map_result;
   mapped_file.file_size = file_size;
 
   return mapped_file;
 }
 
-namespace private_internal_api {
-
-/// Get mapped file implementation.
-Result<MappedFlagValueFile> get_mapped_flag_value_file_impl(
-    std::string const& pb_file,
-    std::string const& container) {
-  auto file_result = find_storage_file(pb_file, container);
-  if (!file_result.ok()) {
-    return Error() << file_result.error();
-  }
-  auto mapped_result = map_storage_file(*file_result);
-  if (!mapped_result.ok()) {
-    return Error() << "failed to map " << *file_result << ": "
-                   << mapped_result.error();
-  }
-  return *mapped_result;
-}
-
-} // namespace private internal api
-
-/// Get mapped writeable flag value file
-Result<MappedFlagValueFile> get_mapped_flag_value_file(
-    std::string const& container) {
-  return private_internal_api::get_mapped_flag_value_file_impl(
-      kPersistStorageRecordsPb, container);
+/// Get mutable mapped file
+Result<MutableMappedStorageFile> get_mutable_mapped_file(
+    std::string const& container,
+    StorageFileType file_type) {
+  return private_internal_api::get_mutable_mapped_file_impl(
+      kPersistStorageRecordsPb, container, file_type);
 }
 
 /// Set boolean flag value
 Result<void> set_boolean_flag_value(
-    const MappedFlagValueFile& file,
+    const MutableMappedStorageFile& file,
     uint32_t offset,
     bool value) {
   auto content = rust::Slice<uint8_t>(
@@ -126,6 +142,38 @@
   return {};
 }
 
+/// Set if flag is sticky
+Result<void> set_flag_is_sticky(
+    const MutableMappedStorageFile& file,
+    FlagValueType value_type,
+    uint32_t offset,
+    bool value) {
+  auto content = rust::Slice<uint8_t>(
+      static_cast<uint8_t*>(file.file_ptr), file.file_size);
+  auto update_cxx = update_flag_is_sticky_cxx(
+      content, static_cast<uint16_t>(value_type), offset, value);
+  if (!update_cxx.update_success) {
+    return Error() << std::string(update_cxx.error_message.c_str());
+  }
+  return {};
+}
+
+/// Set if flag has override
+Result<void> set_flag_has_override(
+    const MutableMappedStorageFile& file,
+    FlagValueType value_type,
+    uint32_t offset,
+    bool value) {
+  auto content = rust::Slice<uint8_t>(
+      static_cast<uint8_t*>(file.file_ptr), file.file_size);
+  auto update_cxx = update_flag_has_override_cxx(
+      content, static_cast<uint16_t>(value_type), offset, value);
+  if (!update_cxx.update_success) {
+    return Error() << std::string(update_cxx.error_message.c_str());
+  }
+  return {};
+}
+
 Result<void> create_flag_info(
     std::string const& package_map,
     std::string const& flag_map,
diff --git a/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp b/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp
index b652510..e9e4ebb 100644
--- a/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp
+++ b/tools/aconfig/aconfig_storage_write_api/include/aconfig_storage/aconfig_storage_write_api.hpp
@@ -4,13 +4,14 @@
 #include <string>
 
 #include <android-base/result.h>
+#include <aconfig_storage/aconfig_storage_read_api.hpp>
 
 using namespace android::base;
 
 namespace aconfig_storage {
 
 /// Mapped flag value file
-struct MappedFlagValueFile{
+struct MutableMappedStorageFile{
   void* file_ptr;
   size_t file_size;
 };
@@ -18,19 +19,39 @@
 /// DO NOT USE APIS IN THE FOLLOWING NAMESPACE DIRECTLY
 namespace private_internal_api {
 
-Result<MappedFlagValueFile> get_mapped_flag_value_file_impl(
+Result<MutableMappedStorageFile> get_mutable_mapped_file_impl(
     std::string const& pb_file,
-    std::string const& container);
+    std::string const& container,
+    StorageFileType file_type);
 
 } // namespace private_internal_api
 
-/// Get mapped writeable flag value file
-Result<MappedFlagValueFile> get_mapped_flag_value_file(
-    std::string const& container);
+/// Map a storage file
+Result<MutableMappedStorageFile> map_mutable_storage_file(
+    std::string const& file);
+
+/// Get mapped writeable storage file
+Result<MutableMappedStorageFile> get_mutable_mapped_file(
+    std::string const& container,
+    StorageFileType file_type);
 
 /// Set boolean flag value
 Result<void> set_boolean_flag_value(
-    const MappedFlagValueFile& file,
+    const MutableMappedStorageFile& file,
+    uint32_t offset,
+    bool value);
+
+/// Set if flag is sticky
+Result<void> set_flag_is_sticky(
+    const MutableMappedStorageFile& file,
+    FlagValueType value_type,
+    uint32_t offset,
+    bool value);
+
+/// Set if flag has override
+Result<void> set_flag_has_override(
+    const MutableMappedStorageFile& file,
+    FlagValueType value_type,
     uint32_t offset,
     bool value);
 
diff --git a/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs b/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs
new file mode 100644
index 0000000..3f38705
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_write_api/src/flag_info_update.rs
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! flag info update module defines the flag info file write to mapped bytes
+
+use aconfig_storage_file::{
+    read_u8_from_bytes, AconfigStorageError, FlagInfoBit, FlagInfoHeader, FlagValueType,
+    FILE_VERSION,
+};
+use anyhow::anyhow;
+
+fn get_flag_info_offset(
+    buf: &mut [u8],
+    flag_type: FlagValueType,
+    flag_index: u32,
+) -> Result<usize, AconfigStorageError> {
+    let interpreted_header = FlagInfoHeader::from_bytes(buf)?;
+    if interpreted_header.version > FILE_VERSION {
+        return Err(AconfigStorageError::HigherStorageFileVersion(anyhow!(
+            "Cannot write to storage file with a higher version of {} with lib version {}",
+            interpreted_header.version,
+            FILE_VERSION
+        )));
+    }
+
+    // get byte offset to the flag info
+    let head = match flag_type {
+        FlagValueType::Boolean => (interpreted_header.boolean_flag_offset + flag_index) as usize,
+    };
+
+    if head >= interpreted_header.file_size as usize {
+        return Err(AconfigStorageError::InvalidStorageFileOffset(anyhow!(
+            "Flag value offset goes beyond the end of the file."
+        )));
+    }
+
+    Ok(head)
+}
+
+fn get_flag_attribute_and_offset(
+    buf: &mut [u8],
+    flag_type: FlagValueType,
+    flag_index: u32,
+) -> Result<(u8, usize), AconfigStorageError> {
+    let head = get_flag_info_offset(buf, flag_type, flag_index)?;
+    let mut pos = head;
+    let attribute = read_u8_from_bytes(buf, &mut pos)?;
+    Ok((attribute, head))
+}
+
+/// Set if flag is sticky
+pub fn update_flag_is_sticky(
+    buf: &mut [u8],
+    flag_type: FlagValueType,
+    flag_index: u32,
+    value: bool,
+) -> Result<(), AconfigStorageError> {
+    let (attribute, head) = get_flag_attribute_and_offset(buf, flag_type, flag_index)?;
+    let is_sticky = (attribute & (FlagInfoBit::IsSticky as u8)) != 0;
+    if is_sticky != value {
+        buf[head] = (attribute ^ FlagInfoBit::IsSticky as u8).to_le_bytes()[0];
+    }
+    Ok(())
+}
+
+/// Set if flag has override
+pub fn update_flag_has_override(
+    buf: &mut [u8],
+    flag_type: FlagValueType,
+    flag_index: u32,
+    value: bool,
+) -> Result<(), AconfigStorageError> {
+    let (attribute, head) = get_flag_attribute_and_offset(buf, flag_type, flag_index)?;
+    let has_override = (attribute & (FlagInfoBit::HasOverride as u8)) != 0;
+    if has_override != value {
+        buf[head] = (attribute ^ FlagInfoBit::HasOverride as u8).to_le_bytes()[0];
+    }
+    Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+    use aconfig_storage_file::test_utils::create_test_flag_info_list;
+    use aconfig_storage_read_api::flag_info_query::find_flag_attribute;
+
+    #[test]
+    // this test point locks down is sticky update
+    fn test_update_flag_is_sticky() {
+        let flag_info_list = create_test_flag_info_list();
+        let mut buf = flag_info_list.into_bytes();
+        for i in 0..flag_info_list.header.num_flags {
+            update_flag_is_sticky(&mut buf, FlagValueType::Boolean, i, true).unwrap();
+            let attribute = find_flag_attribute(&buf, FlagValueType::Boolean, i).unwrap();
+            assert!((attribute & (FlagInfoBit::IsSticky as u8)) != 0);
+            update_flag_is_sticky(&mut buf, FlagValueType::Boolean, i, false).unwrap();
+            let attribute = find_flag_attribute(&buf, FlagValueType::Boolean, i).unwrap();
+            assert!((attribute & (FlagInfoBit::IsSticky as u8)) == 0);
+        }
+    }
+
+    #[test]
+    // this test point locks down has override update
+    fn test_update_flag_has_override() {
+        let flag_info_list = create_test_flag_info_list();
+        let mut buf = flag_info_list.into_bytes();
+        for i in 0..flag_info_list.header.num_flags {
+            update_flag_has_override(&mut buf, FlagValueType::Boolean, i, true).unwrap();
+            let attribute = find_flag_attribute(&buf, FlagValueType::Boolean, i).unwrap();
+            assert!((attribute & (FlagInfoBit::HasOverride as u8)) != 0);
+            update_flag_has_override(&mut buf, FlagValueType::Boolean, i, false).unwrap();
+            let attribute = find_flag_attribute(&buf, FlagValueType::Boolean, i).unwrap();
+            assert!((attribute & (FlagInfoBit::HasOverride as u8)) == 0);
+        }
+    }
+}
diff --git a/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs b/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs
index 4cb7939..0938715 100644
--- a/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs
+++ b/tools/aconfig/aconfig_storage_write_api/src/flag_value_update.rs
@@ -22,7 +22,7 @@
 /// Set flag value
 pub fn update_boolean_flag_value(
     buf: &mut [u8],
-    flag_offset: u32,
+    flag_index: u32,
     flag_value: bool,
 ) -> Result<(), AconfigStorageError> {
     let interpreted_header = FlagValueHeader::from_bytes(buf)?;
@@ -34,10 +34,8 @@
         )));
     }
 
-    let head = (interpreted_header.boolean_value_offset + flag_offset) as usize;
-
-    // TODO: right now, there is only boolean flags, with more flag value types added
-    // later, the end of boolean flag value section should be updated (b/322826265).
+    // get byte offset to the flag
+    let head = (interpreted_header.boolean_value_offset + flag_index) as usize;
     if head >= interpreted_header.file_size as usize {
         return Err(AconfigStorageError::InvalidStorageFileOffset(anyhow!(
             "Flag value offset goes beyond the end of the file."
@@ -51,20 +49,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use aconfig_storage_file::{FlagValueList, StorageFileType};
-
-    pub fn create_test_flag_value_list() -> FlagValueList {
-        let header = FlagValueHeader {
-            version: FILE_VERSION,
-            container: String::from("system"),
-            file_type: StorageFileType::FlagVal as u8,
-            file_size: 35,
-            num_flags: 8,
-            boolean_value_offset: 27,
-        };
-        let booleans: Vec<bool> = vec![false; 8];
-        FlagValueList { header, booleans }
-    }
+    use aconfig_storage_file::test_utils::create_test_flag_value_list;
 
     #[test]
     // this test point locks down flag value update
diff --git a/tools/aconfig/aconfig_storage_write_api/src/lib.rs b/tools/aconfig/aconfig_storage_write_api/src/lib.rs
index 678bbd5..7148d06 100644
--- a/tools/aconfig/aconfig_storage_write_api/src/lib.rs
+++ b/tools/aconfig/aconfig_storage_write_api/src/lib.rs
@@ -17,6 +17,7 @@
 //! `aconfig_storage_write_api` is a crate that defines write apis to update flag value
 //! in storage file. It provides one api to interface with storage files.
 
+pub mod flag_info_update;
 pub mod flag_value_update;
 pub mod mapped_file;
 
@@ -24,8 +25,8 @@
 mod test_utils;
 
 use aconfig_storage_file::{
-    AconfigStorageError, FlagInfoHeader, FlagInfoList, FlagInfoNode, FlagTable, PackageTable,
-    StorageFileType, StoredFlagType, FILE_VERSION,
+    AconfigStorageError, FlagInfoHeader, FlagInfoList, FlagInfoNode, FlagTable, FlagValueType,
+    PackageTable, StorageFileType, StoredFlagType, FILE_VERSION,
 };
 
 use anyhow::anyhow;
@@ -36,10 +37,11 @@
 /// Storage file location pb file
 pub const STORAGE_LOCATION_FILE: &str = "/metadata/aconfig/persistent_storage_file_records.pb";
 
-/// Get mmaped flag value file given the container name
+/// Get read write mapped storage files.
 ///
 /// \input container: the flag package container
-/// \return a result of mapped file
+/// \input file_type: storage file type enum
+/// \return a result of read write mapped file
 ///
 ///
 /// # Safety
@@ -48,23 +50,64 @@
 /// file not thru this memory mapped file or there are concurrent writes to this
 /// memory mapped file. Ensure all writes to the underlying file are thru this memory
 /// mapped file and there are no concurrent writes.
-pub unsafe fn get_mapped_flag_value_file(container: &str) -> Result<MmapMut, AconfigStorageError> {
-    unsafe { crate::mapped_file::get_mapped_file(STORAGE_LOCATION_FILE, container) }
+pub unsafe fn get_mapped_storage_file(
+    container: &str,
+    file_type: StorageFileType,
+) -> Result<MmapMut, AconfigStorageError> {
+    unsafe { crate::mapped_file::get_mapped_file(STORAGE_LOCATION_FILE, container, file_type) }
 }
 
 /// Set boolean flag value thru mapped file and flush the change to file
 ///
 /// \input mapped_file: the mapped flag value file
-/// \input offset: flag value offset
+/// \input index: flag index
 /// \input value: updated flag value
 /// \return a result of ()
 ///
 pub fn set_boolean_flag_value(
     file: &mut MmapMut,
-    offset: u32,
+    index: u32,
     value: bool,
 ) -> Result<(), AconfigStorageError> {
-    crate::flag_value_update::update_boolean_flag_value(file, offset, value)?;
+    crate::flag_value_update::update_boolean_flag_value(file, index, value)?;
+    file.flush().map_err(|errmsg| {
+        AconfigStorageError::MapFlushFail(anyhow!("fail to flush storage file: {}", errmsg))
+    })
+}
+
+/// Set if flag is sticky thru mapped file and flush the change to file
+///
+/// \input mapped_file: the mapped flag info file
+/// \input index: flag index
+/// \input value: updated flag sticky value
+/// \return a result of ()
+///
+pub fn set_flag_is_sticky(
+    file: &mut MmapMut,
+    flag_type: FlagValueType,
+    index: u32,
+    value: bool,
+) -> Result<(), AconfigStorageError> {
+    crate::flag_info_update::update_flag_is_sticky(file, flag_type, index, value)?;
+    file.flush().map_err(|errmsg| {
+        AconfigStorageError::MapFlushFail(anyhow!("fail to flush storage file: {}", errmsg))
+    })
+}
+
+/// Set if flag has override thru mapped file and flush the change to file
+///
+/// \input mapped_file: the mapped flag info file
+/// \input index: flag index
+/// \input value: updated flag has override value
+/// \return a result of ()
+///
+pub fn set_flag_has_override(
+    file: &mut MmapMut,
+    flag_type: FlagValueType,
+    index: u32,
+    value: bool,
+) -> Result<(), AconfigStorageError> {
+    crate::flag_info_update::update_flag_has_override(file, flag_type, index, value)?;
     file.flush().map_err(|errmsg| {
         AconfigStorageError::MapFlushFail(anyhow!("fail to flush storage file: {}", errmsg))
     })
@@ -106,15 +149,15 @@
         )));
     }
 
-    let mut package_offsets = vec![0; package_table.header.num_packages as usize];
+    let mut package_start_index = vec![0; package_table.header.num_packages as usize];
     for node in package_table.nodes.iter() {
-        package_offsets[node.package_id as usize] = node.boolean_offset;
+        package_start_index[node.package_id as usize] = node.boolean_start_index;
     }
 
     let mut is_flag_rw = vec![false; flag_table.header.num_flags as usize];
     for node in flag_table.nodes.iter() {
-        let flag_offset = package_offsets[node.package_id as usize] + node.flag_id as u32;
-        is_flag_rw[flag_offset as usize] = node.flag_type == StoredFlagType::ReadWriteBoolean;
+        let flag_index = package_start_index[node.package_id as usize] + node.flag_index as u32;
+        is_flag_rw[flag_index as usize] = node.flag_type == StoredFlagType::ReadWriteBoolean;
     }
 
     let mut list = FlagInfoList {
@@ -163,6 +206,18 @@
         pub error_message: String,
     }
 
+    // Flag is sticky update return for cc interlop
+    pub struct FlagIsStickyUpdateCXX {
+        pub update_success: bool,
+        pub error_message: String,
+    }
+
+    // Flag has override update return for cc interlop
+    pub struct FlagHasOverrideUpdateCXX {
+        pub update_success: bool,
+        pub error_message: String,
+    }
+
     // Flag info file creation return for cc interlop
     pub struct FlagInfoCreationCXX {
         pub success: bool,
@@ -177,6 +232,20 @@
             value: bool,
         ) -> BooleanFlagValueUpdateCXX;
 
+        pub fn update_flag_is_sticky_cxx(
+            file: &mut [u8],
+            flag_type: u16,
+            offset: u32,
+            value: bool,
+        ) -> FlagIsStickyUpdateCXX;
+
+        pub fn update_flag_has_override_cxx(
+            file: &mut [u8],
+            flag_type: u16,
+            offset: u32,
+            value: bool,
+        ) -> FlagHasOverrideUpdateCXX;
+
         pub fn create_flag_info_cxx(
             package_map: &str,
             flag_map: &str,
@@ -201,6 +270,59 @@
     }
 }
 
+pub(crate) fn update_flag_is_sticky_cxx(
+    file: &mut [u8],
+    flag_type: u16,
+    offset: u32,
+    value: bool,
+) -> ffi::FlagIsStickyUpdateCXX {
+    match FlagValueType::try_from(flag_type) {
+        Ok(value_type) => {
+            match crate::flag_info_update::update_flag_is_sticky(file, value_type, offset, value) {
+                Ok(()) => ffi::FlagIsStickyUpdateCXX {
+                    update_success: true,
+                    error_message: String::from(""),
+                },
+                Err(errmsg) => ffi::FlagIsStickyUpdateCXX {
+                    update_success: false,
+                    error_message: format!("{:?}", errmsg),
+                },
+            }
+        }
+        Err(errmsg) => ffi::FlagIsStickyUpdateCXX {
+            update_success: false,
+            error_message: format!("{:?}", errmsg),
+        },
+    }
+}
+
+pub(crate) fn update_flag_has_override_cxx(
+    file: &mut [u8],
+    flag_type: u16,
+    offset: u32,
+    value: bool,
+) -> ffi::FlagHasOverrideUpdateCXX {
+    match FlagValueType::try_from(flag_type) {
+        Ok(value_type) => {
+            match crate::flag_info_update::update_flag_has_override(file, value_type, offset, value)
+            {
+                Ok(()) => ffi::FlagHasOverrideUpdateCXX {
+                    update_success: true,
+                    error_message: String::from(""),
+                },
+                Err(errmsg) => ffi::FlagHasOverrideUpdateCXX {
+                    update_success: false,
+                    error_message: format!("{:?}", errmsg),
+                },
+            }
+        }
+        Err(errmsg) => ffi::FlagHasOverrideUpdateCXX {
+            update_success: false,
+            error_message: format!("{:?}", errmsg),
+        },
+    }
+}
+
 /// Create flag info file cc interlop
 pub(crate) fn create_flag_info_cxx(
     package_map: &str,
@@ -224,6 +346,8 @@
         create_test_flag_info_list, create_test_flag_table, create_test_package_table,
         write_bytes_to_temp_file,
     };
+    use aconfig_storage_file::FlagInfoBit;
+    use aconfig_storage_read_api::flag_info_query::find_flag_attribute;
     use aconfig_storage_read_api::flag_value_query::find_boolean_flag_value;
     use std::fs::File;
     use std::io::Read;
@@ -248,6 +372,7 @@
     package_map: "some_package.map"
     flag_map: "some_flag.map"
     flag_val: "{}"
+    flag_info: "some_flag.info"
     timestamp: 12345
 }}
 "#,
@@ -260,7 +385,12 @@
         // The safety here is guaranteed as only this single threaded test process will
         // write to this file
         unsafe {
-            let mut file = crate::mapped_file::get_mapped_file(&record_pb_path, "system").unwrap();
+            let mut file = crate::mapped_file::get_mapped_file(
+                &record_pb_path,
+                "system",
+                StorageFileType::FlagVal,
+            )
+            .unwrap();
             for i in 0..8 {
                 set_boolean_flag_value(&mut file, i, true).unwrap();
                 let value = get_boolean_flag_value_at_offset(&flag_value_path, i);
@@ -273,6 +403,101 @@
         }
     }
 
+    fn get_flag_attribute_at_offset(file: &str, value_type: FlagValueType, offset: u32) -> u8 {
+        let mut f = File::open(&file).unwrap();
+        let mut bytes = Vec::new();
+        f.read_to_end(&mut bytes).unwrap();
+        find_flag_attribute(&bytes, value_type, offset).unwrap()
+    }
+
+    #[test]
+    fn test_set_flag_is_sticky() {
+        let flag_info_file = copy_to_temp_file("./tests/flag.info", false).unwrap();
+        let flag_info_path = flag_info_file.path().display().to_string();
+        let text_proto = format!(
+            r#"
+    files {{
+        version: 0
+        container: "system"
+        package_map: "some_package.map"
+        flag_map: "some_flag.map"
+        flag_val: "some_flag.val"
+        flag_info: "{}"
+        timestamp: 12345
+    }}
+    "#,
+            flag_info_path
+        );
+        let record_pb_file = write_proto_to_temp_file(&text_proto).unwrap();
+        let record_pb_path = record_pb_file.path().display().to_string();
+
+        // SAFETY:
+        // The safety here is guaranteed as only this single threaded test process will
+        // write to this file
+        unsafe {
+            let mut file = crate::mapped_file::get_mapped_file(
+                &record_pb_path,
+                "system",
+                StorageFileType::FlagInfo,
+            )
+            .unwrap();
+            for i in 0..8 {
+                set_flag_is_sticky(&mut file, FlagValueType::Boolean, i, true).unwrap();
+                let attribute =
+                    get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+                assert!((attribute & (FlagInfoBit::IsSticky as u8)) != 0);
+                set_flag_is_sticky(&mut file, FlagValueType::Boolean, i, false).unwrap();
+                let attribute =
+                    get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+                assert!((attribute & (FlagInfoBit::IsSticky as u8)) == 0);
+            }
+        }
+    }
+
+    #[test]
+    fn test_set_flag_has_override() {
+        let flag_info_file = copy_to_temp_file("./tests/flag.info", false).unwrap();
+        let flag_info_path = flag_info_file.path().display().to_string();
+        let text_proto = format!(
+            r#"
+    files {{
+        version: 0
+        container: "system"
+        package_map: "some_package.map"
+        flag_map: "some_flag.map"
+        flag_val: "some_flag.val"
+        flag_info: "{}"
+        timestamp: 12345
+    }}
+    "#,
+            flag_info_path
+        );
+        let record_pb_file = write_proto_to_temp_file(&text_proto).unwrap();
+        let record_pb_path = record_pb_file.path().display().to_string();
+
+        // SAFETY:
+        // The safety here is guaranteed as only this single threaded test process will
+        // write to this file
+        unsafe {
+            let mut file = crate::mapped_file::get_mapped_file(
+                &record_pb_path,
+                "system",
+                StorageFileType::FlagInfo,
+            )
+            .unwrap();
+            for i in 0..8 {
+                set_flag_has_override(&mut file, FlagValueType::Boolean, i, true).unwrap();
+                let attribute =
+                    get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+                assert!((attribute & (FlagInfoBit::HasOverride as u8)) != 0);
+                set_flag_has_override(&mut file, FlagValueType::Boolean, i, false).unwrap();
+                let attribute =
+                    get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+                assert!((attribute & (FlagInfoBit::HasOverride as u8)) == 0);
+            }
+        }
+    }
+
     fn create_empty_temp_file() -> Result<NamedTempFile, AconfigStorageError> {
         let file = NamedTempFile::new().map_err(|_| {
             AconfigStorageError::FileCreationFail(anyhow!("Failed to create temp file"))
diff --git a/tools/aconfig/aconfig_storage_write_api/src/mapped_file.rs b/tools/aconfig/aconfig_storage_write_api/src/mapped_file.rs
index 4c98be4..ea9ac19 100644
--- a/tools/aconfig/aconfig_storage_write_api/src/mapped_file.rs
+++ b/tools/aconfig/aconfig_storage_write_api/src/mapped_file.rs
@@ -14,43 +14,41 @@
  * limitations under the License.
  */
 
-use std::fs::{self, File, OpenOptions};
-use std::io::{BufReader, Read};
-
 use anyhow::anyhow;
 use memmap2::MmapMut;
+use std::fs::{self, OpenOptions};
+use std::io::Read;
 
-use aconfig_storage_file::protos::{storage_record_pb::try_from_binary_proto, ProtoStorageFiles};
-use aconfig_storage_file::AconfigStorageError::{
-    self, FileReadFail, MapFileFail, ProtobufParseFail, StorageFileNotFound,
-};
+use aconfig_storage_file::AconfigStorageError::{self, FileReadFail, MapFileFail};
+use aconfig_storage_file::StorageFileType;
+use aconfig_storage_read_api::mapped_file::find_container_storage_location;
 
-/// Find where persistent storage value file is for a particular container
-fn find_persist_flag_value_file(
-    location_pb_file: &str,
-    container: &str,
-) -> Result<String, AconfigStorageError> {
-    let file = File::open(location_pb_file).map_err(|errmsg| {
-        FileReadFail(anyhow!("Failed to open file {}: {}", location_pb_file, errmsg))
-    })?;
-    let mut reader = BufReader::new(file);
-    let mut bytes = Vec::new();
-    reader.read_to_end(&mut bytes).map_err(|errmsg| {
-        FileReadFail(anyhow!("Failed to read file {}: {}", location_pb_file, errmsg))
-    })?;
-    let storage_locations: ProtoStorageFiles = try_from_binary_proto(&bytes).map_err(|errmsg| {
-        ProtobufParseFail(anyhow!(
-            "Failed to parse storage location pb file {}: {}",
-            location_pb_file,
-            errmsg
-        ))
-    })?;
-    for location_info in storage_locations.files.iter() {
-        if location_info.container() == container {
-            return Ok(location_info.flag_val().to_string());
-        }
+/// Get the mutable memory mapping of a storage file
+///
+/// # Safety
+///
+/// The memory mapped file may have undefined behavior if there are writes to this
+/// file not thru this memory mapped file or there are concurrent writes to this
+/// memory mapped file. Ensure all writes to the underlying file are thru this memory
+/// mapped file and there are no concurrent writes.
+unsafe fn map_file(file_path: &str) -> Result<MmapMut, AconfigStorageError> {
+    // make sure file has read write permission
+    let perms = fs::metadata(file_path).unwrap().permissions();
+    if perms.readonly() {
+        return Err(MapFileFail(anyhow!("fail to map non read write storage file {}", file_path)));
     }
-    Err(StorageFileNotFound(anyhow!("Persistent flag value file does not exist for {}", container)))
+
+    let file =
+        OpenOptions::new().read(true).write(true).open(file_path).map_err(|errmsg| {
+            FileReadFail(anyhow!("Failed to open file {}: {}", file_path, errmsg))
+        })?;
+
+    unsafe {
+        let mapped_file = MmapMut::map_mut(&file).map_err(|errmsg| {
+            MapFileFail(anyhow!("fail to map storage file {}: {}", file_path, errmsg))
+        })?;
+        Ok(mapped_file)
+    }
 }
 
 /// Get a mapped storage file given the container and file type
@@ -64,24 +62,16 @@
 pub unsafe fn get_mapped_file(
     location_pb_file: &str,
     container: &str,
+    file_type: StorageFileType,
 ) -> Result<MmapMut, AconfigStorageError> {
-    let file_path = find_persist_flag_value_file(location_pb_file, container)?;
-
-    // make sure file has read write permission
-    let perms = fs::metadata(&file_path).unwrap().permissions();
-    if perms.readonly() {
-        return Err(MapFileFail(anyhow!("fail to map non read write storage file {}", file_path)));
-    }
-
-    let file =
-        OpenOptions::new().read(true).write(true).open(&file_path).map_err(|errmsg| {
-            FileReadFail(anyhow!("Failed to open file {}: {}", file_path, errmsg))
-        })?;
-
-    unsafe {
-        MmapMut::map_mut(&file).map_err(|errmsg| {
-            MapFileFail(anyhow!("fail to map storage file {}: {}", file_path, errmsg))
-        })
+    let files_location = find_container_storage_location(location_pb_file, container)?;
+    match file_type {
+        StorageFileType::FlagVal => unsafe { map_file(files_location.flag_val()) },
+        StorageFileType::FlagInfo => unsafe { map_file(files_location.flag_info()) },
+        _ => Err(MapFileFail(anyhow!(
+            "Cannot map file type {:?} as writeable memory mapped files.",
+            file_type
+        ))),
     }
 }
 
@@ -92,41 +82,9 @@
     use aconfig_storage_file::protos::storage_record_pb::write_proto_to_temp_file;
 
     #[test]
-    fn test_find_persist_flag_value_file_location() {
-        let text_proto = r#"
-files {
-    version: 0
-    container: "system"
-    package_map: "/system/etc/package.map"
-    flag_map: "/system/etc/flag.map"
-    flag_val: "/metadata/aconfig/system.val"
-    timestamp: 12345
-}
-files {
-    version: 1
-    container: "product"
-    package_map: "/product/etc/package.map"
-    flag_map: "/product/etc/flag.map"
-    flag_val: "/metadata/aconfig/product.val"
-    timestamp: 54321
-}
-"#;
-        let file = write_proto_to_temp_file(&text_proto).unwrap();
-        let file_full_path = file.path().display().to_string();
-        let flag_value_file = find_persist_flag_value_file(&file_full_path, "system").unwrap();
-        assert_eq!(flag_value_file, "/metadata/aconfig/system.val");
-        let flag_value_file = find_persist_flag_value_file(&file_full_path, "product").unwrap();
-        assert_eq!(flag_value_file, "/metadata/aconfig/product.val");
-        let err = find_persist_flag_value_file(&file_full_path, "vendor").unwrap_err();
-        assert_eq!(
-            format!("{:?}", err),
-            "StorageFileNotFound(Persistent flag value file does not exist for vendor)"
-        );
-    }
-
-    #[test]
     fn test_mapped_file_contents() {
-        let mut rw_file = copy_to_temp_file("./tests/flag.val", false).unwrap();
+        let mut rw_val_file = copy_to_temp_file("./tests/flag.val", false).unwrap();
+        let mut rw_info_file = copy_to_temp_file("./tests/flag.info", false).unwrap();
         let text_proto = format!(
             r#"
 files {{
@@ -135,21 +93,37 @@
     package_map: "some_package.map"
     flag_map: "some_flag.map"
     flag_val: "{}"
+    flag_info: "{}"
     timestamp: 12345
 }}
 "#,
-            rw_file.path().display().to_string()
+            rw_val_file.path().display().to_string(),
+            rw_info_file.path().display().to_string()
         );
         let storage_record_file = write_proto_to_temp_file(&text_proto).unwrap();
         let storage_record_file_path = storage_record_file.path().display().to_string();
 
         let mut content = Vec::new();
-        rw_file.read_to_end(&mut content).unwrap();
+        rw_val_file.read_to_end(&mut content).unwrap();
 
         // SAFETY:
         // The safety here is guaranteed here as no writes happens to this temp file
         unsafe {
-            let mmaped_file = get_mapped_file(&storage_record_file_path, "system").unwrap();
+            let mmaped_file =
+                get_mapped_file(&storage_record_file_path, "system", StorageFileType::FlagVal)
+                    .unwrap();
+            assert_eq!(mmaped_file[..], content[..]);
+        }
+
+        let mut content = Vec::new();
+        rw_info_file.read_to_end(&mut content).unwrap();
+
+        // SAFETY:
+        // The safety here is guaranteed here as no writes happens to this temp file
+        unsafe {
+            let mmaped_file =
+                get_mapped_file(&storage_record_file_path, "system", StorageFileType::FlagInfo)
+                    .unwrap();
             assert_eq!(mmaped_file[..], content[..]);
         }
     }
@@ -165,6 +139,7 @@
     package_map: "some_package.map"
     flag_map: "some_flag.map"
     flag_val: "{}"
+    flag_info: "some_flag.info"
     timestamp: 12345
 }}
 "#,
@@ -176,7 +151,9 @@
         // SAFETY:
         // The safety here is guaranteed here as no writes happens to this temp file
         unsafe {
-            let error = get_mapped_file(&storage_record_file_path, "system").unwrap_err();
+            let error =
+                get_mapped_file(&storage_record_file_path, "system", StorageFileType::FlagVal)
+                    .unwrap_err();
             assert_eq!(
                 format!("{:?}", error),
                 format!(
@@ -186,4 +163,49 @@
             );
         }
     }
+
+    #[test]
+    fn test_mapped_not_supported_file() {
+        let text_proto = format!(
+            r#"
+files {{
+    version: 0
+    container: "system"
+    package_map: "some_package.map"
+    flag_map: "some_flag.map"
+    flag_val: "some_flag.val"
+    flag_info: "some_flag.info"
+    timestamp: 12345
+}}
+"#,
+        );
+        let storage_record_file = write_proto_to_temp_file(&text_proto).unwrap();
+        let storage_record_file_path = storage_record_file.path().display().to_string();
+
+        // SAFETY:
+        // The safety here is guaranteed here as no writes happens to this temp file
+        unsafe {
+            let error =
+                get_mapped_file(&storage_record_file_path, "system", StorageFileType::PackageMap)
+                    .unwrap_err();
+            assert_eq!(
+                format!("{:?}", error),
+                format!(
+                    "MapFileFail(Cannot map file type {:?} as writeable memory mapped files.)",
+                    StorageFileType::PackageMap
+                )
+            );
+
+            let error =
+                get_mapped_file(&storage_record_file_path, "system", StorageFileType::FlagMap)
+                    .unwrap_err();
+            assert_eq!(
+                format!("{:?}", error),
+                format!(
+                    "MapFileFail(Cannot map file type {:?} as writeable memory mapped files.)",
+                    StorageFileType::FlagMap
+                )
+            );
+        }
+    }
 }
diff --git a/tools/aconfig/aconfig_storage_write_api/tests/Android.bp b/tools/aconfig/aconfig_storage_write_api/tests/Android.bp
index 5b23dbc..85568e0 100644
--- a/tools/aconfig/aconfig_storage_write_api/tests/Android.bp
+++ b/tools/aconfig/aconfig_storage_write_api/tests/Android.bp
@@ -1,8 +1,7 @@
-
 rust_test {
     name: "aconfig_storage_write_api.test.rust",
     srcs: [
-        "storage_write_api_test.rs"
+        "storage_write_api_test.rs",
     ],
     rustlibs: [
         "libanyhow",
@@ -14,6 +13,7 @@
     ],
     data: [
         "flag.val",
+        "flag.info",
     ],
     test_suites: ["general-tests"],
 }
@@ -34,6 +34,7 @@
     ],
     data: [
         "flag.val",
+        "flag.info",
     ],
     test_suites: [
         "device-tests",
diff --git a/tools/aconfig/aconfig_storage_write_api/tests/flag.info b/tools/aconfig/aconfig_storage_write_api/tests/flag.info
new file mode 100644
index 0000000..820d839
--- /dev/null
+++ b/tools/aconfig/aconfig_storage_write_api/tests/flag.info
Binary files differ
diff --git a/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.cpp b/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.cpp
index 00b737c..6de3327 100644
--- a/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.cpp
+++ b/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.cpp
@@ -50,7 +50,8 @@
     return temp_file;
   }
 
-  Result<std::string> write_storage_location_pb_file(std::string const& flag_val) {
+  Result<std::string> write_storage_location_pb_file(std::string const& flag_val,
+                                                     std::string const& flag_info) {
     auto temp_file = std::tmpnam(nullptr);
     auto proto = storage_files();
     auto* info = proto.add_files();
@@ -59,6 +60,7 @@
     info->set_package_map("some_package.map");
     info->set_flag_map("some_flag.map");
     info->set_flag_val(flag_val);
+    info->set_flag_info(flag_info);
     info->set_timestamp(12345);
 
     auto content = std::string();
@@ -72,22 +74,25 @@
   void SetUp() override {
     auto const test_dir = android::base::GetExecutableDirectory();
     flag_val = *copy_to_rw_temp_file(test_dir + "/flag.val");
-    storage_record_pb = *write_storage_location_pb_file(flag_val);
+    flag_info = *copy_to_rw_temp_file(test_dir + "/flag.info");
+    storage_record_pb = *write_storage_location_pb_file(flag_val, flag_info);
   }
 
   void TearDown() override {
     std::remove(flag_val.c_str());
+    std::remove(flag_info.c_str());
     std::remove(storage_record_pb.c_str());
   }
 
   std::string flag_val;
+  std::string flag_info;
   std::string storage_record_pb;
 };
 
 /// Negative test to lock down the error when mapping none exist storage files
 TEST_F(AconfigStorageTest, test_none_exist_storage_file_mapping) {
-  auto mapped_file_result = private_api::get_mapped_flag_value_file_impl(
-      storage_record_pb, "vendor");
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "vendor", api::StorageFileType::flag_val);
   ASSERT_FALSE(mapped_file_result.ok());
   ASSERT_EQ(mapped_file_result.error().message(),
             "Unable to find storage files for container vendor");
@@ -96,17 +101,34 @@
 /// Negative test to lock down the error when mapping a non writeable storage file
 TEST_F(AconfigStorageTest, test_non_writable_storage_file_mapping) {
   ASSERT_TRUE(chmod(flag_val.c_str(), S_IRUSR | S_IRGRP | S_IROTH) != -1);
-  auto mapped_file_result = private_api::get_mapped_flag_value_file_impl(
-      storage_record_pb, "mockup");
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_val);
   ASSERT_FALSE(mapped_file_result.ok());
   auto it = mapped_file_result.error().message().find("cannot map nonwriteable file");
   ASSERT_TRUE(it != std::string::npos) << mapped_file_result.error().message();
 }
 
+/// Negative test to lock down the error when mapping a file type that cannot be modified
+TEST_F(AconfigStorageTest, test_invalid_storage_file_type_mapping) {
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::package_map);
+  ASSERT_FALSE(mapped_file_result.ok());
+  auto it = mapped_file_result.error().message().find(
+      "Cannot create mutable mapped file for this file type");
+  ASSERT_TRUE(it != std::string::npos) << mapped_file_result.error().message();
+
+  mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_map);
+  ASSERT_FALSE(mapped_file_result.ok());
+  it = mapped_file_result.error().message().find(
+      "Cannot create mutable mapped file for this file type");
+  ASSERT_TRUE(it != std::string::npos) << mapped_file_result.error().message();
+}
+
 /// Test to lock down storage flag value update api
 TEST_F(AconfigStorageTest, test_boolean_flag_value_update) {
-  auto mapped_file_result = private_api::get_mapped_flag_value_file_impl(
-      storage_record_pb, "mockup");
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_val);
   ASSERT_TRUE(mapped_file_result.ok());
   auto mapped_file = *mapped_file_result;
 
@@ -124,8 +146,8 @@
 
 /// Negative test to lock down the error when querying flag value out of range
 TEST_F(AconfigStorageTest, test_invalid_boolean_flag_value_update) {
-  auto mapped_file_result = private_api::get_mapped_flag_value_file_impl(
-      storage_record_pb, "mockup");
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_val);
   ASSERT_TRUE(mapped_file_result.ok());
   auto mapped_file = *mapped_file_result;
   auto update_result = api::set_boolean_flag_value(mapped_file, 8, true);
@@ -133,3 +155,65 @@
   ASSERT_EQ(update_result.error().message(),
             std::string("InvalidStorageFileOffset(Flag value offset goes beyond the end of the file.)"));
 }
+
+/// Test to lock down storage flag stickiness update api
+TEST_F(AconfigStorageTest, test_flag_is_sticky_update) {
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_info);
+  ASSERT_TRUE(mapped_file_result.ok());
+  auto mapped_file = *mapped_file_result;
+
+  for (int offset = 0; offset < 8; ++offset) {
+    auto update_result = api::set_flag_is_sticky(
+        mapped_file, api::FlagValueType::Boolean, offset, true);
+    ASSERT_TRUE(update_result.ok());
+    auto ro_mapped_file = api::MappedStorageFile();
+    ro_mapped_file.file_ptr = mapped_file.file_ptr;
+    ro_mapped_file.file_size = mapped_file.file_size;
+    auto attribute = api::get_flag_attribute(
+        ro_mapped_file, api::FlagValueType::Boolean, offset);
+    ASSERT_TRUE(attribute.ok());
+    ASSERT_TRUE(*attribute & api::FlagInfoBit::IsSticky);
+
+    update_result = api::set_flag_is_sticky(
+        mapped_file, api::FlagValueType::Boolean, offset, false);
+    ASSERT_TRUE(update_result.ok());
+    ro_mapped_file.file_ptr = mapped_file.file_ptr;
+    ro_mapped_file.file_size = mapped_file.file_size;
+    attribute = api::get_flag_attribute(
+        ro_mapped_file, api::FlagValueType::Boolean, offset);
+    ASSERT_TRUE(attribute.ok());
+    ASSERT_FALSE(*attribute & api::FlagInfoBit::IsSticky);
+  }
+}
+
+/// Test to lock down storage flag has override update api
+TEST_F(AconfigStorageTest, test_flag_has_override_update) {
+  auto mapped_file_result = private_api::get_mutable_mapped_file_impl(
+      storage_record_pb, "mockup", api::StorageFileType::flag_info);
+  ASSERT_TRUE(mapped_file_result.ok());
+  auto mapped_file = *mapped_file_result;
+
+  for (int offset = 0; offset < 8; ++offset) {
+    auto update_result = api::set_flag_has_override(
+        mapped_file, api::FlagValueType::Boolean, offset, true);
+    ASSERT_TRUE(update_result.ok());
+    auto ro_mapped_file = api::MappedStorageFile();
+    ro_mapped_file.file_ptr = mapped_file.file_ptr;
+    ro_mapped_file.file_size = mapped_file.file_size;
+    auto attribute = api::get_flag_attribute(
+        ro_mapped_file, api::FlagValueType::Boolean, offset);
+    ASSERT_TRUE(attribute.ok());
+    ASSERT_TRUE(*attribute & api::FlagInfoBit::HasOverride);
+
+    update_result = api::set_flag_has_override(
+        mapped_file, api::FlagValueType::Boolean, offset, false);
+    ASSERT_TRUE(update_result.ok());
+    ro_mapped_file.file_ptr = mapped_file.file_ptr;
+    ro_mapped_file.file_size = mapped_file.file_size;
+    attribute = api::get_flag_attribute(
+        ro_mapped_file, api::FlagValueType::Boolean, offset);
+    ASSERT_TRUE(attribute.ok());
+    ASSERT_FALSE(*attribute & api::FlagInfoBit::HasOverride);
+  }
+}
diff --git a/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.rs b/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.rs
index 4bda54c..5dd36c4 100644
--- a/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.rs
+++ b/tools/aconfig/aconfig_storage_write_api/tests/storage_write_api_test.rs
@@ -1,8 +1,13 @@
 #[cfg(not(feature = "cargo"))]
 mod aconfig_storage_write_api_test {
     use aconfig_storage_file::protos::ProtoStorageFiles;
+    use aconfig_storage_file::{FlagInfoBit, FlagValueType, StorageFileType};
+    use aconfig_storage_read_api::flag_info_query::find_flag_attribute;
     use aconfig_storage_read_api::flag_value_query::find_boolean_flag_value;
-    use aconfig_storage_write_api::{mapped_file::get_mapped_file, set_boolean_flag_value};
+    use aconfig_storage_write_api::{
+        mapped_file::get_mapped_file, set_boolean_flag_value, set_flag_has_override,
+        set_flag_is_sticky,
+    };
 
     use protobuf::Message;
     use std::fs::{self, File};
@@ -10,7 +15,7 @@
     use tempfile::NamedTempFile;
 
     /// Write storage location record pb to a temp file
-    fn write_storage_record_file(flag_val: &str) -> NamedTempFile {
+    fn write_storage_record_file(flag_val: &str, flag_info: &str) -> NamedTempFile {
         let text_proto = format!(
             r#"
 files {{
@@ -19,10 +24,11 @@
     package_map: "some_package_map"
     flag_map: "some_flag_map"
     flag_val: "{}"
+    flag_info: "{}"
     timestamp: 12345
 }}
 "#,
-            flag_val
+            flag_val, flag_info
         );
         let storage_files: ProtoStorageFiles =
             protobuf::text_format::parse_from_str(&text_proto).unwrap();
@@ -48,18 +54,30 @@
         find_boolean_flag_value(&bytes, offset).unwrap()
     }
 
+    /// Get flag attribute at offset
+    fn get_flag_attribute_at_offset(file: &str, value_type: FlagValueType, offset: u32) -> u8 {
+        let mut f = File::open(file).unwrap();
+        let mut bytes = Vec::new();
+        f.read_to_end(&mut bytes).unwrap();
+        find_flag_attribute(&bytes, value_type, offset).unwrap()
+    }
+
     #[test]
     /// Test to lock down flag value update api
     fn test_boolean_flag_value_update() {
         let flag_value_file = copy_to_temp_rw_file("./flag.val");
+        let flag_info_file = copy_to_temp_rw_file("./flag.info");
         let flag_value_path = flag_value_file.path().display().to_string();
-        let record_pb_file = write_storage_record_file(&flag_value_path);
+        let flag_info_path = flag_info_file.path().display().to_string();
+        let record_pb_file = write_storage_record_file(&flag_value_path, &flag_info_path);
         let record_pb_path = record_pb_file.path().display().to_string();
 
         // SAFETY:
         // The safety here is ensured as only this single threaded test process will
         // write to this file
-        let mut file = unsafe { get_mapped_file(&record_pb_path, "mockup").unwrap() };
+        let mut file = unsafe {
+            get_mapped_file(&record_pb_path, "mockup", StorageFileType::FlagVal).unwrap()
+        };
         for i in 0..8 {
             set_boolean_flag_value(&mut file, i, true).unwrap();
             let value = get_boolean_flag_value_at_offset(&flag_value_path, i);
@@ -70,4 +88,60 @@
             assert!(!value);
         }
     }
+
+    #[test]
+    /// Test to lock down flag is sticky update api
+    fn test_set_flag_is_sticky() {
+        let flag_value_file = copy_to_temp_rw_file("./flag.val");
+        let flag_info_file = copy_to_temp_rw_file("./flag.info");
+        let flag_value_path = flag_value_file.path().display().to_string();
+        let flag_info_path = flag_info_file.path().display().to_string();
+        let record_pb_file = write_storage_record_file(&flag_value_path, &flag_info_path);
+        let record_pb_path = record_pb_file.path().display().to_string();
+
+        // SAFETY:
+        // The safety here is ensured as only this single threaded test process will
+        // write to this file
+        let mut file = unsafe {
+            get_mapped_file(&record_pb_path, "mockup", StorageFileType::FlagInfo).unwrap()
+        };
+        for i in 0..8 {
+            set_flag_is_sticky(&mut file, FlagValueType::Boolean, i, true).unwrap();
+            let attribute =
+                get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+            assert!((attribute & (FlagInfoBit::IsSticky as u8)) != 0);
+            set_flag_is_sticky(&mut file, FlagValueType::Boolean, i, false).unwrap();
+            let attribute =
+                get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+            assert!((attribute & (FlagInfoBit::IsSticky as u8)) == 0);
+        }
+    }
+
+    #[test]
+    /// Test to lock down flag is sticky update api
+    fn test_set_flag_has_override() {
+        let flag_value_file = copy_to_temp_rw_file("./flag.val");
+        let flag_info_file = copy_to_temp_rw_file("./flag.info");
+        let flag_value_path = flag_value_file.path().display().to_string();
+        let flag_info_path = flag_info_file.path().display().to_string();
+        let record_pb_file = write_storage_record_file(&flag_value_path, &flag_info_path);
+        let record_pb_path = record_pb_file.path().display().to_string();
+
+        // SAFETY:
+        // The safety here is ensured as only this single threaded test process will
+        // write to this file
+        let mut file = unsafe {
+            get_mapped_file(&record_pb_path, "mockup", StorageFileType::FlagInfo).unwrap()
+        };
+        for i in 0..8 {
+            set_flag_has_override(&mut file, FlagValueType::Boolean, i, true).unwrap();
+            let attribute =
+                get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+            assert!((attribute & (FlagInfoBit::HasOverride as u8)) != 0);
+            set_flag_has_override(&mut file, FlagValueType::Boolean, i, false).unwrap();
+            let attribute =
+                get_flag_attribute_at_offset(&flag_info_path, FlagValueType::Boolean, i);
+            assert!((attribute & (FlagInfoBit::HasOverride as u8)) == 0);
+        }
+    }
 }
diff --git a/tools/aconfig/aflags/Android.bp b/tools/aconfig/aflags/Android.bp
index 4920a6f..2a02379 100644
--- a/tools/aconfig/aflags/Android.bp
+++ b/tools/aconfig/aflags/Android.bp
@@ -9,6 +9,7 @@
     lints: "android",
     srcs: ["src/main.rs"],
     rustlibs: [
+        "libaconfig_device_paths",
         "libaconfig_protos",
         "libaconfig_storage_read_api",
         "libaconfig_storage_file",
diff --git a/tools/aconfig/aflags/Cargo.toml b/tools/aconfig/aflags/Cargo.toml
index cce7f9d..eeae295 100644
--- a/tools/aconfig/aflags/Cargo.toml
+++ b/tools/aconfig/aflags/Cargo.toml
@@ -13,3 +13,4 @@
 aconfig_storage_file = { version = "0.1.0", path = "../aconfig_storage_file" }
 aconfig_storage_read_api = { version = "0.1.0", path = "../aconfig_storage_read_api" }
 clap = {version = "4.5.2" }
+aconfig_device_paths = { version = "0.1.0", path = "../aconfig_device_paths" }
diff --git a/tools/aconfig/aflags/src/device_config_source.rs b/tools/aconfig/aflags/src/device_config_source.rs
index 089f33d..cf6ab28 100644
--- a/tools/aconfig/aflags/src/device_config_source.rs
+++ b/tools/aconfig/aflags/src/device_config_source.rs
@@ -14,78 +14,17 @@
  * limitations under the License.
  */
 
-use crate::{Flag, FlagPermission, FlagSource, FlagValue, ValuePickedFrom};
-use aconfig_protos::ProtoFlagPermission as ProtoPermission;
-use aconfig_protos::ProtoFlagState as ProtoState;
-use aconfig_protos::ProtoParsedFlag;
-use aconfig_protos::ProtoParsedFlags;
+use crate::load_protos;
+use crate::{Flag, FlagSource, FlagValue, ValuePickedFrom};
+
 use anyhow::{anyhow, bail, Result};
 use regex::Regex;
-use std::collections::BTreeMap;
 use std::collections::HashMap;
 use std::process::Command;
-use std::{fs, str};
+use std::str;
 
 pub struct DeviceConfigSource {}
 
-fn convert_parsed_flag(flag: &ProtoParsedFlag) -> Flag {
-    let namespace = flag.namespace().to_string();
-    let package = flag.package().to_string();
-    let name = flag.name().to_string();
-
-    let container = if flag.container().is_empty() {
-        "system".to_string()
-    } else {
-        flag.container().to_string()
-    };
-
-    let value = match flag.state() {
-        ProtoState::ENABLED => FlagValue::Enabled,
-        ProtoState::DISABLED => FlagValue::Disabled,
-    };
-
-    let permission = match flag.permission() {
-        ProtoPermission::READ_ONLY => FlagPermission::ReadOnly,
-        ProtoPermission::READ_WRITE => FlagPermission::ReadWrite,
-    };
-
-    Flag {
-        namespace,
-        package,
-        name,
-        container,
-        value,
-        staged_value: None,
-        permission,
-        value_picked_from: ValuePickedFrom::Default,
-    }
-}
-
-fn read_pb_files() -> Result<Vec<Flag>> {
-    let mut flags: BTreeMap<String, Flag> = BTreeMap::new();
-    for partition in ["system", "system_ext", "product", "vendor"] {
-        let path = format!("/{partition}/etc/aconfig_flags.pb");
-        let Ok(bytes) = fs::read(&path) else {
-            eprintln!("warning: failed to read {}", path);
-            continue;
-        };
-        let parsed_flags: ProtoParsedFlags = protobuf::Message::parse_from_bytes(&bytes)?;
-        for flag in parsed_flags.parsed_flag {
-            let key = format!("{}.{}", flag.package(), flag.name());
-            let container = if flag.container().is_empty() {
-                "system".to_string()
-            } else {
-                flag.container().to_string()
-            };
-
-            if container.eq(partition) {
-                flags.insert(key, convert_parsed_flag(&flag));
-            }
-        }
-    }
-    Ok(flags.values().cloned().collect())
-}
-
 fn parse_device_config(raw: &str) -> Result<HashMap<String, FlagValue>> {
     let mut flags = HashMap::new();
     let regex = Regex::new(r"(?m)^([[[:alnum:]]_]+/[[[:alnum:]]_\.]+)=(true|false)$")?;
@@ -180,7 +119,7 @@
 
 impl FlagSource for DeviceConfigSource {
     fn list_flags() -> Result<Vec<Flag>> {
-        let pb_flags = read_pb_files()?;
+        let pb_flags = load_protos::load()?;
         let dc_flags = read_device_config_flags()?;
         let staged_flags = read_staged_flags()?;
 
diff --git a/tools/aconfig/aflags/src/load_protos.rs b/tools/aconfig/aflags/src/load_protos.rs
new file mode 100644
index 0000000..90d8599
--- /dev/null
+++ b/tools/aconfig/aflags/src/load_protos.rs
@@ -0,0 +1,62 @@
+use crate::{Flag, FlagPermission, FlagValue, ValuePickedFrom};
+use aconfig_protos::ProtoFlagPermission as ProtoPermission;
+use aconfig_protos::ProtoFlagState as ProtoState;
+use aconfig_protos::ProtoParsedFlag;
+use aconfig_protos::ProtoParsedFlags;
+use anyhow::Result;
+use std::fs;
+use std::path::Path;
+
+// TODO(b/329875578): use container field directly instead of inferring.
+fn infer_container(path: &Path) -> String {
+    let path_str = path.to_string_lossy();
+    path_str
+        .strip_prefix("/apex/")
+        .or_else(|| path_str.strip_prefix('/'))
+        .unwrap_or(&path_str)
+        .strip_suffix("/etc/aconfig_flags.pb")
+        .unwrap_or(&path_str)
+        .to_string()
+}
+
+fn convert_parsed_flag(path: &Path, flag: &ProtoParsedFlag) -> Flag {
+    let namespace = flag.namespace().to_string();
+    let package = flag.package().to_string();
+    let name = flag.name().to_string();
+
+    let value = match flag.state() {
+        ProtoState::ENABLED => FlagValue::Enabled,
+        ProtoState::DISABLED => FlagValue::Disabled,
+    };
+
+    let permission = match flag.permission() {
+        ProtoPermission::READ_ONLY => FlagPermission::ReadOnly,
+        ProtoPermission::READ_WRITE => FlagPermission::ReadWrite,
+    };
+
+    Flag {
+        namespace,
+        package,
+        name,
+        container: infer_container(path),
+        value,
+        staged_value: None,
+        permission,
+        value_picked_from: ValuePickedFrom::Default,
+    }
+}
+
+pub(crate) fn load() -> Result<Vec<Flag>> {
+    let mut result = Vec::new();
+
+    let paths = aconfig_device_paths::parsed_flags_proto_paths()?;
+    for path in paths {
+        let bytes = fs::read(path.clone())?;
+        let parsed_flags: ProtoParsedFlags = protobuf::Message::parse_from_bytes(&bytes)?;
+        for flag in parsed_flags.parsed_flag {
+            // TODO(b/334954748): enforce one-container-per-flag invariant.
+            result.push(convert_parsed_flag(&path, &flag));
+        }
+    }
+    Ok(result)
+}
diff --git a/tools/aconfig/aflags/src/main.rs b/tools/aconfig/aflags/src/main.rs
index 1c453c5..4ce0d35 100644
--- a/tools/aconfig/aflags/src/main.rs
+++ b/tools/aconfig/aflags/src/main.rs
@@ -25,6 +25,8 @@
 mod aconfig_storage_source;
 use aconfig_storage_source::AconfigStorageSource;
 
+mod load_protos;
+
 #[derive(Clone, PartialEq, Debug)]
 enum FlagPermission {
     ReadOnly,
diff --git a/tools/check-flagged-apis/Android.bp b/tools/check-flagged-apis/Android.bp
new file mode 100644
index 0000000..ebd79c1
--- /dev/null
+++ b/tools/check-flagged-apis/Android.bp
@@ -0,0 +1,51 @@
+// Copyright (C) 2024 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package {
+    default_team: "trendy_team_updatable_sdk_apis",
+    default_applicable_licenses: ["Android-Apache-2.0"],
+}
+
+java_defaults {
+    name: "check-flagged-apis-defaults",
+    srcs: [
+        "src/com/android/checkflaggedapis/Main.kt",
+    ],
+    static_libs: [
+        "libaconfig_java_proto_lite",
+        "metalava-signature-reader",
+        "metalava-tools-common-m2-deps",
+    ],
+}
+
+java_binary_host {
+    name: "check-flagged-apis",
+    defaults: [
+        "check-flagged-apis-defaults",
+    ],
+    main_class: "com.android.checkflaggedapis.Main",
+}
+
+java_test_host {
+    name: "check-flagged-apis-test",
+    defaults: [
+        "check-flagged-apis-defaults",
+    ],
+    srcs: [
+        "src/com/android/checkflaggedapis/CheckFlaggedApisTest.kt",
+    ],
+    static_libs: [
+        "tradefed",
+    ],
+}
diff --git a/tools/check-flagged-apis/OWNERS b/tools/check-flagged-apis/OWNERS
new file mode 100644
index 0000000..289e21e
--- /dev/null
+++ b/tools/check-flagged-apis/OWNERS
@@ -0,0 +1,4 @@
+amhk@google.com
+gurpreetgs@google.com
+michaelwr@google.com
+paulduffin@google.com
diff --git a/tools/check-flagged-apis/src/com/android/checkflaggedapis/CheckFlaggedApisTest.kt b/tools/check-flagged-apis/src/com/android/checkflaggedapis/CheckFlaggedApisTest.kt
new file mode 100644
index 0000000..d2b75d4
--- /dev/null
+++ b/tools/check-flagged-apis/src/com/android/checkflaggedapis/CheckFlaggedApisTest.kt
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.android.checkflaggedapis
+
+import android.aconfig.Aconfig
+import com.android.tradefed.testtype.DeviceJUnit4ClassRunner
+import com.android.tradefed.testtype.junit4.BaseHostJUnit4Test
+import java.io.ByteArrayInputStream
+import java.io.ByteArrayOutputStream
+import java.io.InputStream
+import org.junit.Assert.assertEquals
+import org.junit.Test
+import org.junit.runner.RunWith
+
+private val API_SIGNATURE =
+    """
+      // Signature format: 2.0
+      package android {
+        public final class Clazz {
+          ctor public Clazz();
+          field @FlaggedApi("android.flag.foo") public static final int FOO = 1; // 0x1
+        }
+      }
+"""
+        .trim()
+
+private val API_VERSIONS =
+    """
+      <?xml version="1.0" encoding="utf-8"?>
+      <api version="3">
+        <class name="android/Clazz" since="1">
+          <method name="&lt;init>()V"/>
+          <field name="FOO"/>
+        </class>
+      </api>
+"""
+        .trim()
+
+private fun generateFlagsProto(fooState: Aconfig.flag_state): InputStream {
+  val parsed_flag =
+      Aconfig.parsed_flag
+          .newBuilder()
+          .setPackage("android.flag")
+          .setName("foo")
+          .setState(fooState)
+          .setPermission(Aconfig.flag_permission.READ_ONLY)
+          .build()
+  val parsed_flags = Aconfig.parsed_flags.newBuilder().addParsedFlag(parsed_flag).build()
+  val binaryProto = ByteArrayOutputStream()
+  parsed_flags.writeTo(binaryProto)
+  return ByteArrayInputStream(binaryProto.toByteArray())
+}
+
+@RunWith(DeviceJUnit4ClassRunner::class)
+class CheckFlaggedApisTest : BaseHostJUnit4Test() {
+  @Test
+  fun testParseApiSignature() {
+    val expected = setOf(Pair(Symbol("android.Clazz.FOO"), Flag("android.flag.foo")))
+    val actual = parseApiSignature("in-memory", API_SIGNATURE.byteInputStream())
+    assertEquals(expected, actual)
+  }
+
+  @Test
+  fun testParseFlagValues() {
+    val expected: Map<Flag, Boolean> = mapOf(Flag("android.flag.foo") to true)
+    val actual = parseFlagValues(generateFlagsProto(Aconfig.flag_state.ENABLED))
+    assertEquals(expected, actual)
+  }
+
+  @Test
+  fun testParseApiVersions() {
+    val expected: Set<Symbol> = setOf(Symbol("android.Clazz.FOO"))
+    val actual = parseApiVersions(API_VERSIONS.byteInputStream())
+    assertEquals(expected, actual)
+  }
+
+  @Test
+  fun testFindErrorsNoErrors() {
+    val expected = setOf<ApiError>()
+    val actual =
+        findErrors(
+            parseApiSignature("in-memory", API_SIGNATURE.byteInputStream()),
+            parseFlagValues(generateFlagsProto(Aconfig.flag_state.ENABLED)),
+            parseApiVersions(API_VERSIONS.byteInputStream()))
+    assertEquals(expected, actual)
+  }
+
+  @Test
+  fun testFindErrorsDisabledFlaggedApiIsPresent() {
+    val expected =
+        setOf<ApiError>(
+            DisabledFlaggedApiIsPresentError(Symbol("android.Clazz.FOO"), Flag("android.flag.foo")))
+    val actual =
+        findErrors(
+            parseApiSignature("in-memory", API_SIGNATURE.byteInputStream()),
+            parseFlagValues(generateFlagsProto(Aconfig.flag_state.DISABLED)),
+            parseApiVersions(API_VERSIONS.byteInputStream()))
+    assertEquals(expected, actual)
+  }
+}
diff --git a/tools/check-flagged-apis/src/com/android/checkflaggedapis/Main.kt b/tools/check-flagged-apis/src/com/android/checkflaggedapis/Main.kt
new file mode 100644
index 0000000..84564ba
--- /dev/null
+++ b/tools/check-flagged-apis/src/com/android/checkflaggedapis/Main.kt
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@file:JvmName("Main")
+
+package com.android.checkflaggedapis
+
+import android.aconfig.Aconfig
+import com.android.tools.metalava.model.BaseItemVisitor
+import com.android.tools.metalava.model.FieldItem
+import com.android.tools.metalava.model.text.ApiFile
+import com.github.ajalt.clikt.core.CliktCommand
+import com.github.ajalt.clikt.core.ProgramResult
+import com.github.ajalt.clikt.parameters.options.help
+import com.github.ajalt.clikt.parameters.options.option
+import com.github.ajalt.clikt.parameters.options.required
+import com.github.ajalt.clikt.parameters.types.path
+import java.io.InputStream
+import javax.xml.parsers.DocumentBuilderFactory
+import org.w3c.dom.Node
+
+/**
+ * Class representing the fully qualified name of a class, method or field.
+ *
+ * This tool reads a multitude of input formats all of which represents the fully qualified path to
+ * a Java symbol slightly differently. To keep things consistent, all parsed APIs are converted to
+ * Symbols.
+ *
+ * All parts of the fully qualified name of the Symbol are separated by a dot, e.g.:
+ * <pre>
+ *   package.class.inner-class.field
+ * </pre>
+ */
+@JvmInline
+internal value class Symbol(val name: String) {
+  companion object {
+    private val FORBIDDEN_CHARS = listOf('/', '#', '$')
+
+    /** Create a new Symbol from a String that may include delimiters other than dot. */
+    fun create(name: String): Symbol {
+      var sanitizedName = name
+      for (ch in FORBIDDEN_CHARS) {
+        sanitizedName = sanitizedName.replace(ch, '.')
+      }
+      return Symbol(sanitizedName)
+    }
+  }
+
+  init {
+    require(!name.isEmpty()) { "empty string" }
+    for (ch in FORBIDDEN_CHARS) {
+      require(!name.contains(ch)) { "$name: contains $ch" }
+    }
+  }
+
+  override fun toString(): String = name.toString()
+}
+
+/**
+ * Class representing the fully qualified name of an aconfig flag.
+ *
+ * This includes both the flag's package and name, separated by a dot, e.g.:
+ * <pre>
+ *   com.android.aconfig.test.disabled_ro
+ * <pre>
+ */
+@JvmInline
+internal value class Flag(val name: String) {
+  override fun toString(): String = name.toString()
+}
+
+internal sealed class ApiError {
+  abstract val symbol: Symbol
+  abstract val flag: Flag
+}
+
+internal data class EnabledFlaggedApiNotPresentError(
+    override val symbol: Symbol,
+    override val flag: Flag
+) : ApiError() {
+  override fun toString(): String {
+    return "error: enabled @FlaggedApi not present in built artifact: symbol=$symbol flag=$flag"
+  }
+}
+
+internal data class DisabledFlaggedApiIsPresentError(
+    override val symbol: Symbol,
+    override val flag: Flag
+) : ApiError() {
+  override fun toString(): String {
+    return "error: disabled @FlaggedApi is present in built artifact: symbol=$symbol flag=$flag"
+  }
+}
+
+internal data class UnknownFlagError(override val symbol: Symbol, override val flag: Flag) :
+    ApiError() {
+  override fun toString(): String {
+    return "error: unknown flag: symbol=$symbol flag=$flag"
+  }
+}
+
+class CheckCommand :
+    CliktCommand(
+        help =
+            """
+Check that all flagged APIs are used in the correct way.
+
+This tool reads the API signature file and checks that all flagged APIs are used in the correct way.
+
+The tool will exit with a non-zero exit code if any flagged APIs are found to be used in the incorrect way.
+""") {
+  private val apiSignaturePath by
+      option("--api-signature")
+          .help(
+              """
+              Path to API signature file.
+              Usually named *current.txt.
+              Tip: `m frameworks-base-api-current.txt` will generate a file that includes all platform and mainline APIs.
+              """)
+          .path(mustExist = true, canBeDir = false, mustBeReadable = true)
+          .required()
+  private val flagValuesPath by
+      option("--flag-values")
+          .help(
+              """
+            Path to aconfig parsed_flags binary proto file.
+            Tip: `m all_aconfig_declarations` will generate a file that includes all information about all flags.
+            """)
+          .path(mustExist = true, canBeDir = false, mustBeReadable = true)
+          .required()
+  private val apiVersionsPath by
+      option("--api-versions")
+          .help(
+              """
+            Path to API versions XML file.
+            Usually named xml-versions.xml.
+            Tip: `m sdk dist` will generate a file that includes all platform and mainline APIs.
+            """)
+          .path(mustExist = true, canBeDir = false, mustBeReadable = true)
+          .required()
+
+  override fun run() {
+    val flaggedSymbols =
+        apiSignaturePath.toFile().inputStream().use {
+          parseApiSignature(apiSignaturePath.toString(), it)
+        }
+    val flags = flagValuesPath.toFile().inputStream().use { parseFlagValues(it) }
+    val exportedSymbols = apiVersionsPath.toFile().inputStream().use { parseApiVersions(it) }
+    val errors = findErrors(flaggedSymbols, flags, exportedSymbols)
+    for (e in errors) {
+      println(e)
+    }
+    throw ProgramResult(errors.size)
+  }
+}
+
+internal fun parseApiSignature(path: String, input: InputStream): Set<Pair<Symbol, Flag>> {
+  // TODO(334870672): add support for classes and metods
+  val output = mutableSetOf<Pair<Symbol, Flag>>()
+  val visitor =
+      object : BaseItemVisitor() {
+        override fun visitField(field: FieldItem) {
+          val flag =
+              field.modifiers
+                  .findAnnotation("android.annotation.FlaggedApi")
+                  ?.findAttribute("value")
+                  ?.value
+                  ?.value() as? String
+          if (flag != null) {
+            val symbol = Symbol.create(field.baselineElementId())
+            output.add(Pair(symbol, Flag(flag)))
+          }
+        }
+      }
+  val codebase = ApiFile.parseApi(path, input)
+  codebase.accept(visitor)
+  return output
+}
+
+internal fun parseFlagValues(input: InputStream): Map<Flag, Boolean> {
+  val parsedFlags = Aconfig.parsed_flags.parseFrom(input).getParsedFlagList()
+  return parsedFlags.associateBy(
+      { Flag("${it.getPackage()}.${it.getName()}") },
+      { it.getState() == Aconfig.flag_state.ENABLED })
+}
+
+internal fun parseApiVersions(input: InputStream): Set<Symbol> {
+  fun Node.getAttribute(name: String): String? = getAttributes()?.getNamedItem(name)?.getNodeValue()
+
+  val output = mutableSetOf<Symbol>()
+  val factory = DocumentBuilderFactory.newInstance()
+  val parser = factory.newDocumentBuilder()
+  val document = parser.parse(input)
+  val fields = document.getElementsByTagName("field")
+  // ktfmt doesn't understand the `..<` range syntax; explicitly call .rangeUntil instead
+  for (i in 0.rangeUntil(fields.getLength())) {
+    val field = fields.item(i)
+    val fieldName = field.getAttribute("name")
+    val className =
+        requireNotNull(field.getParentNode()) { "Bad XML: top level <field> element" }
+            .getAttribute("name")
+    output.add(Symbol.create("$className.$fieldName"))
+  }
+  return output
+}
+
+/**
+ * Find errors in the given data.
+ *
+ * @param flaggedSymbolsInSource the set of symbols that are flagged in the source code
+ * @param flags the set of flags and their values
+ * @param symbolsInOutput the set of symbols that are present in the output
+ * @return the set of errors found
+ */
+internal fun findErrors(
+    flaggedSymbolsInSource: Set<Pair<Symbol, Flag>>,
+    flags: Map<Flag, Boolean>,
+    symbolsInOutput: Set<Symbol>
+): Set<ApiError> {
+  val errors = mutableSetOf<ApiError>()
+  for ((symbol, flag) in flaggedSymbolsInSource) {
+    try {
+      if (flags.getValue(flag)) {
+        if (!symbolsInOutput.contains(symbol)) {
+          errors.add(EnabledFlaggedApiNotPresentError(symbol, flag))
+        }
+      } else {
+        if (symbolsInOutput.contains(symbol)) {
+          errors.add(DisabledFlaggedApiIsPresentError(symbol, flag))
+        }
+      }
+    } catch (e: NoSuchElementException) {
+      errors.add(UnknownFlagError(symbol, flag))
+    }
+  }
+  return errors
+}
+
+fun main(args: Array<String>) = CheckCommand().main(args)
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 4941c71..9385f0c 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -244,7 +244,6 @@
         "boot_signer",
         "brotli",
         "bsdiff",
-        "imgdiff",
         "lz4",
         "mkbootfs",
         "signapk",
@@ -308,7 +307,6 @@
         "brotli",
         "bsdiff",
         "deapexer",
-        "imgdiff",
         "lz4",
         "mkbootfs",
         "signapk",
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 8836248..2367691 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -490,7 +490,6 @@
       return -1
 
     props = [
-        "ro.board.api_level",
         "ro.board.first_api_level",
         "ro.product.first_api_level",
     ]
@@ -955,6 +954,13 @@
   d["build.prop"] = d["system.build.prop"]
 
   if d.get("avb_enable") == "true":
+    build_info = BuildInfo(d, use_legacy_id=True)
+    # Set up the salt for partitions without build.prop
+    if build_info.fingerprint:
+      if "fingerprint" not in d:
+        d["fingerprint"] = build_info.fingerprint
+      if "avb_salt" not in d:
+        d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
     # Set the vbmeta digest if exists
     try:
       d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
@@ -1517,7 +1523,7 @@
       AVB_ARG_NAME_CHAIN_PARTITION: []
   }
 
-  for partition, path in partitions.items():
+  for partition, path in sorted(partitions.items()):
     avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict)
     if not avb_partition_arg:
       continue
@@ -1605,7 +1611,7 @@
       "avb_custom_vbmeta_images_partition_list", "").strip().split()]
 
   avb_partitions = {}
-  for partition, path in partitions.items():
+  for partition, path in sorted(partitions.items()):
     if partition not in needed_partitions:
       continue
     assert (partition in AVB_PARTITIONS or
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 67438e6..2c5fe0d 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -1288,7 +1288,7 @@
       assert len(words) >= 1 and len(words) <= 2
       OPTIONS.vabc_compression_param = a.lower()
       if len(words) == 2:
-        if not words[1].isdigit():
+        if not words[1].lstrip("-").isdigit():
           raise ValueError("Cannot parse value %r for option $COMPRESSION_LEVEL - only "
                            "integers are allowed." % words[1])
     elif o == "--security_patch_level":