Merge "Adding BT2020 to ColorManagement in RenderEngine drawMesh"
diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp
index 90cadb4..50a2412 100644
--- a/cmds/installd/dexopt.cpp
+++ b/cmds/installd/dexopt.cpp
@@ -201,23 +201,53 @@
}
}
-// Automatically adds binary and null terminator arg.
-static inline void ExecVWithArgs(const char* bin, const std::vector<std::string>& args) {
- std::vector<const char*> argv = {bin};
- for (const std::string& arg : args) {
- argv.push_back(arg.c_str());
- }
- // Add null terminator.
- argv.push_back(nullptr);
- execv(bin, (char * const *)&argv[0]);
-}
+// ExecVHelper prepares and holds pointers to parsed command line arguments so that no allocations
+// need to be performed between the fork and exec.
+class ExecVHelper {
+ public:
+ // Store a placeholder for the binary name.
+ ExecVHelper() : args_(1u, std::string()) {}
-static inline void AddArgIfNonEmpty(const std::string& arg, std::vector<std::string>* args) {
- DCHECK(args != nullptr);
- if (!arg.empty()) {
- args->push_back(arg);
+ void PrepareArgs(const std::string& bin) {
+ CHECK(!args_.empty());
+ CHECK(args_[0].empty());
+ args_[0] = bin;
+ // Write char* into array.
+ for (const std::string& arg : args_) {
+ argv_.push_back(arg.c_str());
+ }
+ argv_.push_back(nullptr); // Add null terminator.
}
-}
+
+ [[ noreturn ]]
+ void Exec(int exit_code) {
+ execv(argv_[0], (char * const *)&argv_[0]);
+ PLOG(ERROR) << "execv(" << argv_[0] << ") failed";
+ exit(exit_code);
+ }
+
+ // Add an arg if it's not empty.
+ void AddArg(const std::string& arg) {
+ if (!arg.empty()) {
+ args_.push_back(arg);
+ }
+ }
+
+ // Add a runtime arg if it's not empty.
+ void AddRuntimeArg(const std::string& arg) {
+ if (!arg.empty()) {
+ args_.push_back("--runtime-arg");
+ args_.push_back(arg);
+ }
+ }
+
+ protected:
+ // Holder arrays for backing arg storage.
+ std::vector<std::string> args_;
+
+ // Argument poiners.
+ std::vector<const char*> argv_;
+};
static std::string MapPropertyToArg(const std::string& property,
const std::string& format,
@@ -229,212 +259,220 @@
return "";
}
-[[ noreturn ]]
-static void run_dex2oat(int zip_fd, int oat_fd, int input_vdex_fd, int output_vdex_fd, int image_fd,
- const char* input_file_name, const char* output_file_name, int swap_fd,
- const char* instruction_set, const char* compiler_filter,
- bool debuggable, bool post_bootcomplete, bool background_job_compile, int profile_fd,
- const char* class_loader_context, int target_sdk_version, bool enable_hidden_api_checks,
- bool generate_compact_dex, int dex_metadata_fd, const char* compilation_reason) {
- // Get the relative path to the input file.
- const char* relative_input_file_name = get_location_from_path(input_file_name);
+class RunDex2Oat : public ExecVHelper {
+ public:
+ RunDex2Oat(int zip_fd,
+ int oat_fd,
+ int input_vdex_fd,
+ int output_vdex_fd,
+ int image_fd,
+ const char* input_file_name,
+ const char* output_file_name,
+ int swap_fd,
+ const char* instruction_set,
+ const char* compiler_filter,
+ bool debuggable,
+ bool post_bootcomplete,
+ bool background_job_compile,
+ int profile_fd,
+ const char* class_loader_context,
+ int target_sdk_version,
+ bool enable_hidden_api_checks,
+ bool generate_compact_dex,
+ int dex_metadata_fd,
+ const char* compilation_reason) {
+ // Get the relative path to the input file.
+ const char* relative_input_file_name = get_location_from_path(input_file_name);
- std::string dex2oat_Xms_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xms", "-Xms%s");
- std::string dex2oat_Xmx_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xmx", "-Xmx%s");
+ std::string dex2oat_Xms_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xms", "-Xms%s");
+ std::string dex2oat_Xmx_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xmx", "-Xmx%s");
- const char* threads_property = post_bootcomplete
- ? "dalvik.vm.dex2oat-threads"
- : "dalvik.vm.boot-dex2oat-threads";
- std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s");
+ const char* threads_property = post_bootcomplete
+ ? "dalvik.vm.dex2oat-threads"
+ : "dalvik.vm.boot-dex2oat-threads";
+ std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s");
- const std::string dex2oat_isa_features_key =
- StringPrintf("dalvik.vm.isa.%s.features", instruction_set);
- std::string instruction_set_features_arg =
- MapPropertyToArg(dex2oat_isa_features_key, "--instruction-set-features=%s");
+ const std::string dex2oat_isa_features_key =
+ StringPrintf("dalvik.vm.isa.%s.features", instruction_set);
+ std::string instruction_set_features_arg =
+ MapPropertyToArg(dex2oat_isa_features_key, "--instruction-set-features=%s");
- const std::string dex2oat_isa_variant_key =
- StringPrintf("dalvik.vm.isa.%s.variant", instruction_set);
- std::string instruction_set_variant_arg =
- MapPropertyToArg(dex2oat_isa_variant_key, "--instruction-set-variant=%s");
+ const std::string dex2oat_isa_variant_key =
+ StringPrintf("dalvik.vm.isa.%s.variant", instruction_set);
+ std::string instruction_set_variant_arg =
+ MapPropertyToArg(dex2oat_isa_variant_key, "--instruction-set-variant=%s");
- const char *dex2oat_norelocation = "-Xnorelocate";
+ const char* dex2oat_norelocation = "-Xnorelocate";
- const std::string dex2oat_flags = GetProperty("dalvik.vm.dex2oat-flags", "");
- std::vector<std::string> dex2oat_flags_args = SplitBySpaces(dex2oat_flags);
- ALOGV("dalvik.vm.dex2oat-flags=%s\n", dex2oat_flags.c_str());
+ const std::string dex2oat_flags = GetProperty("dalvik.vm.dex2oat-flags", "");
+ std::vector<std::string> dex2oat_flags_args = SplitBySpaces(dex2oat_flags);
+ ALOGV("dalvik.vm.dex2oat-flags=%s\n", dex2oat_flags.c_str());
- // If we are booting without the real /data, don't spend time compiling.
- std::string vold_decrypt = GetProperty("vold.decrypt", "");
- bool skip_compilation = vold_decrypt == "trigger_restart_min_framework" ||
- vold_decrypt == "1";
+ // If we are booting without the real /data, don't spend time compiling.
+ std::string vold_decrypt = GetProperty("vold.decrypt", "");
+ bool skip_compilation = vold_decrypt == "trigger_restart_min_framework" ||
+ vold_decrypt == "1";
- const std::string resolve_startup_string_arg =
- MapPropertyToArg("dalvik.vm.dex2oat-resolve-startup-strings",
- "--resolve-startup-const-strings=%s");
- const bool generate_debug_info = GetBoolProperty("debug.generate-debug-info", false);
+ const std::string resolve_startup_string_arg =
+ MapPropertyToArg("dalvik.vm.dex2oat-resolve-startup-strings",
+ "--resolve-startup-const-strings=%s");
+ const bool generate_debug_info = GetBoolProperty("debug.generate-debug-info", false);
- std::string image_format_arg;
- if (image_fd >= 0) {
- image_format_arg = MapPropertyToArg("dalvik.vm.appimageformat", "--image-format=%s");
- }
-
- std::string dex2oat_large_app_threshold_arg =
- MapPropertyToArg("dalvik.vm.dex2oat-very-large", "--very-large-app-threshold=%s");
-
- // If the runtime was requested to use libartd.so, we'll run dex2oatd, otherwise dex2oat.
- const char* dex2oat_bin = "/system/bin/dex2oat";
- constexpr const char* kDex2oatDebugPath = "/system/bin/dex2oatd";
- // Do not use dex2oatd for release candidates (give dex2oat more soak time).
- bool is_release = android::base::GetProperty("ro.build.version.codename", "") == "REL";
- if (is_debug_runtime() || (background_job_compile && is_debuggable_build() && !is_release)) {
- if (access(kDex2oatDebugPath, X_OK) == 0) {
- dex2oat_bin = kDex2oatDebugPath;
+ std::string image_format_arg;
+ if (image_fd >= 0) {
+ image_format_arg = MapPropertyToArg("dalvik.vm.appimageformat", "--image-format=%s");
}
- }
- bool generate_minidebug_info = kEnableMinidebugInfo &&
- android::base::GetBoolProperty(kMinidebugInfoSystemProperty,
- kMinidebugInfoSystemPropertyDefault);
+ std::string dex2oat_large_app_threshold_arg =
+ MapPropertyToArg("dalvik.vm.dex2oat-very-large", "--very-large-app-threshold=%s");
- // clang FORTIFY doesn't let us use strlen in constant array bounds, so we
- // use arraysize instead.
- std::string zip_fd_arg = StringPrintf("--zip-fd=%d", zip_fd);
- std::string zip_location_arg = StringPrintf("--zip-location=%s", relative_input_file_name);
- std::string input_vdex_fd_arg = StringPrintf("--input-vdex-fd=%d", input_vdex_fd);
- std::string output_vdex_fd_arg = StringPrintf("--output-vdex-fd=%d", output_vdex_fd);
- std::string oat_fd_arg = StringPrintf("--oat-fd=%d", oat_fd);
- std::string oat_location_arg = StringPrintf("--oat-location=%s", output_file_name);
- std::string instruction_set_arg = StringPrintf("--instruction-set=%s", instruction_set);
- std::string dex2oat_compiler_filter_arg;
- std::string dex2oat_swap_fd;
- std::string dex2oat_image_fd;
- std::string target_sdk_version_arg;
- if (target_sdk_version != 0) {
- StringPrintf("-Xtarget-sdk-version:%d", target_sdk_version);
- }
- std::string class_loader_context_arg;
- if (class_loader_context != nullptr) {
- class_loader_context_arg = StringPrintf("--class-loader-context=%s", class_loader_context);
- }
+ // If the runtime was requested to use libartd.so, we'll run dex2oatd, otherwise dex2oat.
+ const char* dex2oat_bin = "/system/bin/dex2oat";
+ constexpr const char* kDex2oatDebugPath = "/system/bin/dex2oatd";
+ // Do not use dex2oatd for release candidates (give dex2oat more soak time).
+ bool is_release = android::base::GetProperty("ro.build.version.codename", "") == "REL";
+ if (is_debug_runtime() ||
+ (background_job_compile && is_debuggable_build() && !is_release)) {
+ if (access(kDex2oatDebugPath, X_OK) == 0) {
+ dex2oat_bin = kDex2oatDebugPath;
+ }
+ }
- if (swap_fd >= 0) {
- dex2oat_swap_fd = StringPrintf("--swap-fd=%d", swap_fd);
- }
- if (image_fd >= 0) {
- dex2oat_image_fd = StringPrintf("--app-image-fd=%d", image_fd);
- }
+ bool generate_minidebug_info = kEnableMinidebugInfo &&
+ GetBoolProperty(kMinidebugInfoSystemProperty, kMinidebugInfoSystemPropertyDefault);
- // Compute compiler filter.
- bool have_dex2oat_relocation_skip_flag = false;
- if (skip_compilation) {
- dex2oat_compiler_filter_arg = "--compiler-filter=extract";
- have_dex2oat_relocation_skip_flag = true;
- } else if (compiler_filter != nullptr) {
- dex2oat_compiler_filter_arg = StringPrintf("--compiler-filter=%s", compiler_filter);
- }
+ // clang FORTIFY doesn't let us use strlen in constant array bounds, so we
+ // use arraysize instead.
+ std::string zip_fd_arg = StringPrintf("--zip-fd=%d", zip_fd);
+ std::string zip_location_arg = StringPrintf("--zip-location=%s", relative_input_file_name);
+ std::string input_vdex_fd_arg = StringPrintf("--input-vdex-fd=%d", input_vdex_fd);
+ std::string output_vdex_fd_arg = StringPrintf("--output-vdex-fd=%d", output_vdex_fd);
+ std::string oat_fd_arg = StringPrintf("--oat-fd=%d", oat_fd);
+ std::string oat_location_arg = StringPrintf("--oat-location=%s", output_file_name);
+ std::string instruction_set_arg = StringPrintf("--instruction-set=%s", instruction_set);
+ std::string dex2oat_compiler_filter_arg;
+ std::string dex2oat_swap_fd;
+ std::string dex2oat_image_fd;
+ std::string target_sdk_version_arg;
+ if (target_sdk_version != 0) {
+ StringPrintf("-Xtarget-sdk-version:%d", target_sdk_version);
+ }
+ std::string class_loader_context_arg;
+ if (class_loader_context != nullptr) {
+ class_loader_context_arg = StringPrintf("--class-loader-context=%s",
+ class_loader_context);
+ }
- if (dex2oat_compiler_filter_arg.empty()) {
- dex2oat_compiler_filter_arg = MapPropertyToArg("dalvik.vm.dex2oat-filter",
- "--compiler-filter=%s");
- }
+ if (swap_fd >= 0) {
+ dex2oat_swap_fd = StringPrintf("--swap-fd=%d", swap_fd);
+ }
+ if (image_fd >= 0) {
+ dex2oat_image_fd = StringPrintf("--app-image-fd=%d", image_fd);
+ }
- // Check whether all apps should be compiled debuggable.
- if (!debuggable) {
- debuggable = GetProperty("dalvik.vm.always_debuggable", "") == "1";
- }
- std::string profile_arg;
- if (profile_fd != -1) {
- profile_arg = StringPrintf("--profile-file-fd=%d", profile_fd);
- }
+ // Compute compiler filter.
+ bool have_dex2oat_relocation_skip_flag = false;
+ if (skip_compilation) {
+ dex2oat_compiler_filter_arg = "--compiler-filter=extract";
+ have_dex2oat_relocation_skip_flag = true;
+ } else if (compiler_filter != nullptr) {
+ dex2oat_compiler_filter_arg = StringPrintf("--compiler-filter=%s", compiler_filter);
+ }
- // Get the directory of the apk to pass as a base classpath directory.
- std::string base_dir;
- std::string apk_dir(input_file_name);
- unsigned long dir_index = apk_dir.rfind('/');
- bool has_base_dir = dir_index != std::string::npos;
- if (has_base_dir) {
- apk_dir = apk_dir.substr(0, dir_index);
- base_dir = StringPrintf("--classpath-dir=%s", apk_dir.c_str());
+ if (dex2oat_compiler_filter_arg.empty()) {
+ dex2oat_compiler_filter_arg = MapPropertyToArg("dalvik.vm.dex2oat-filter",
+ "--compiler-filter=%s");
+ }
+
+ // Check whether all apps should be compiled debuggable.
+ if (!debuggable) {
+ debuggable = GetProperty("dalvik.vm.always_debuggable", "") == "1";
+ }
+ std::string profile_arg;
+ if (profile_fd != -1) {
+ profile_arg = StringPrintf("--profile-file-fd=%d", profile_fd);
+ }
+
+ // Get the directory of the apk to pass as a base classpath directory.
+ std::string base_dir;
+ std::string apk_dir(input_file_name);
+ unsigned long dir_index = apk_dir.rfind('/');
+ bool has_base_dir = dir_index != std::string::npos;
+ if (has_base_dir) {
+ apk_dir = apk_dir.substr(0, dir_index);
+ base_dir = StringPrintf("--classpath-dir=%s", apk_dir.c_str());
+ }
+
+ std::string dex_metadata_fd_arg = "--dm-fd=" + std::to_string(dex_metadata_fd);
+
+ std::string compilation_reason_arg = compilation_reason == nullptr
+ ? ""
+ : std::string("--compilation-reason=") + compilation_reason;
+
+ ALOGV("Running %s in=%s out=%s\n", dex2oat_bin, relative_input_file_name, output_file_name);
+
+ // Disable cdex if update input vdex is true since this combination of options is not
+ // supported.
+ const bool disable_cdex = !generate_compact_dex || (input_vdex_fd == output_vdex_fd);
+
+ AddArg(zip_fd_arg);
+ AddArg(zip_location_arg);
+ AddArg(input_vdex_fd_arg);
+ AddArg(output_vdex_fd_arg);
+ AddArg(oat_fd_arg);
+ AddArg(oat_location_arg);
+ AddArg(instruction_set_arg);
+
+ AddArg(instruction_set_variant_arg);
+ AddArg(instruction_set_features_arg);
+
+ AddRuntimeArg(dex2oat_Xms_arg);
+ AddRuntimeArg(dex2oat_Xmx_arg);
+
+ AddArg(resolve_startup_string_arg);
+ AddArg(dex2oat_compiler_filter_arg);
+ AddArg(dex2oat_threads_arg);
+ AddArg(dex2oat_swap_fd);
+ AddArg(dex2oat_image_fd);
+
+ if (generate_debug_info) {
+ AddArg("--generate-debug-info");
+ }
+ if (debuggable) {
+ AddArg("--debuggable");
+ }
+ AddArg(image_format_arg);
+ AddArg(dex2oat_large_app_threshold_arg);
+
+ if (have_dex2oat_relocation_skip_flag) {
+ AddRuntimeArg(dex2oat_norelocation);
+ }
+ AddArg(profile_arg);
+ AddArg(base_dir);
+ AddArg(class_loader_context_arg);
+ if (generate_minidebug_info) {
+ AddArg(kMinidebugDex2oatFlag);
+ }
+ if (disable_cdex) {
+ AddArg(kDisableCompactDexFlag);
+ }
+ AddArg(target_sdk_version_arg);
+ if (enable_hidden_api_checks) {
+ AddRuntimeArg("-Xhidden-api-checks");
+ }
+
+ if (dex_metadata_fd > -1) {
+ AddArg(dex_metadata_fd_arg);
+ }
+
+ AddArg(compilation_reason_arg);
+
+ // Do not add args after dex2oat_flags, they should override others for debugging.
+ args_.insert(args_.end(), dex2oat_flags_args.begin(), dex2oat_flags_args.end());
+
+ PrepareArgs(dex2oat_bin);
}
-
- std::string dex_metadata_fd_arg = "--dm-fd=" + std::to_string(dex_metadata_fd);
-
- std::string compilation_reason_arg = compilation_reason == nullptr
- ? ""
- : std::string("--compilation-reason=") + compilation_reason;
-
- ALOGV("Running %s in=%s out=%s\n", dex2oat_bin, relative_input_file_name, output_file_name);
-
- // Disable cdex if update input vdex is true since this combination of options is not
- // supported.
- const bool disable_cdex = !generate_compact_dex || (input_vdex_fd == output_vdex_fd);
-
- std::vector<std::string> args = {
- zip_fd_arg,
- zip_location_arg,
- input_vdex_fd_arg,
- output_vdex_fd_arg,
- oat_fd_arg,
- oat_location_arg,
- instruction_set_arg,
- };
- auto add_runtime_arg = [&](const std::string& arg) {
- args.push_back("--runtime-arg");
- args.push_back(arg);
- };
-
- AddArgIfNonEmpty(instruction_set_variant_arg, &args);
- AddArgIfNonEmpty(instruction_set_features_arg, &args);
- if (!dex2oat_Xms_arg.empty()) {
- add_runtime_arg(dex2oat_Xms_arg);
- }
- if (!dex2oat_Xmx_arg.empty()) {
- add_runtime_arg(dex2oat_Xmx_arg);
- }
- AddArgIfNonEmpty(resolve_startup_string_arg, &args);
- AddArgIfNonEmpty(dex2oat_compiler_filter_arg, &args);
- AddArgIfNonEmpty(dex2oat_threads_arg, &args);
- AddArgIfNonEmpty(dex2oat_swap_fd, &args);
- AddArgIfNonEmpty(dex2oat_image_fd, &args);
-
- if (generate_debug_info) {
- args.push_back("--generate-debug-info");
- }
- if (debuggable) {
- args.push_back("--debuggable");
- }
- AddArgIfNonEmpty(image_format_arg, &args);
- AddArgIfNonEmpty(dex2oat_large_app_threshold_arg, &args);
- args.insert(args.end(), dex2oat_flags_args.begin(), dex2oat_flags_args.end());
- if (have_dex2oat_relocation_skip_flag) {
- add_runtime_arg(dex2oat_norelocation);
- }
- AddArgIfNonEmpty(profile_arg, &args);
- AddArgIfNonEmpty(base_dir, &args);
- AddArgIfNonEmpty(class_loader_context_arg, &args);
- if (generate_minidebug_info) {
- args.push_back(kMinidebugDex2oatFlag);
- }
- if (disable_cdex) {
- args.push_back(kDisableCompactDexFlag);
- }
- AddArgIfNonEmpty(target_sdk_version_arg, &args);
- if (enable_hidden_api_checks) {
- add_runtime_arg("-Xhidden-api-checks");
- }
-
- if (dex_metadata_fd > -1) {
- args.push_back(dex_metadata_fd_arg);
- }
-
- AddArgIfNonEmpty(compilation_reason_arg, &args);
-
- // Do not add after dex2oat_flags, they should override others for debugging.
-
- ExecVWithArgs(dex2oat_bin, args);
- PLOG(ERROR) << "execv(" << dex2oat_bin << ") failed";
- exit(DexoptReturnCodes::kDex2oatExec);
-}
+};
/*
* Whether dexopt should use a swap file when compiling an APK.
@@ -610,74 +648,85 @@
static constexpr int PROFMAN_BIN_RETURN_CODE_ERROR_IO = 3;
static constexpr int PROFMAN_BIN_RETURN_CODE_ERROR_LOCKING = 4;
-[[ noreturn ]]
-static void run_profman(const std::vector<unique_fd>& profile_fds,
- const unique_fd& reference_profile_fd,
- const std::vector<unique_fd>* apk_fds,
- const std::vector<std::string>* dex_locations,
- bool copy_and_update) {
- const char* profman_bin = is_debug_runtime() ? "/system/bin/profmand" : "/system/bin/profman";
+class RunProfman : public ExecVHelper {
+ public:
+ void SetupArgs(const std::vector<unique_fd>& profile_fds,
+ const unique_fd& reference_profile_fd,
+ const std::vector<unique_fd>& apk_fds,
+ const std::vector<std::string>& dex_locations,
+ bool copy_and_update) {
+ const char* profman_bin =
+ is_debug_runtime() ? "/system/bin/profmand" : "/system/bin/profman";
- if (copy_and_update) {
- CHECK_EQ(1u, profile_fds.size());
- CHECK(apk_fds != nullptr);
- CHECK_EQ(1u, apk_fds->size());
- }
- std::vector<std::string> args;
- args.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile_fd.get()));
-
- for (const unique_fd& fd : profile_fds) {
- args.push_back("--profile-file-fd=" + std::to_string(fd.get()));
- }
-
- if (apk_fds != nullptr) {
- for (const unique_fd& fd : *apk_fds) {
- args.push_back("--apk-fd=" + std::to_string(fd.get()));
+ if (copy_and_update) {
+ CHECK_EQ(1u, profile_fds.size());
+ CHECK_EQ(1u, apk_fds.size());
}
- }
-
- std::vector<std::string> dex_location_args;
- if (dex_locations != nullptr) {
- for (const std::string& dex_location : *dex_locations) {
- args.push_back("--dex-location=" + dex_location);
+ if (reference_profile_fd != -1) {
+ AddArg("--reference-profile-file-fd=" + std::to_string(reference_profile_fd.get()));
}
+
+ for (const unique_fd& fd : profile_fds) {
+ AddArg("--profile-file-fd=" + std::to_string(fd.get()));
+ }
+
+ for (const unique_fd& fd : apk_fds) {
+ AddArg("--apk-fd=" + std::to_string(fd.get()));
+ }
+
+ for (const std::string& dex_location : dex_locations) {
+ AddArg("--dex-location=" + dex_location);
+ }
+
+ if (copy_and_update) {
+ AddArg("--copy-and-update-profile-key");
+ }
+
+ // Do not add after dex2oat_flags, they should override others for debugging.
+ PrepareArgs(profman_bin);
}
- if (copy_and_update) {
- args.push_back("--copy-and-update-profile-key");
+ void SetupMerge(const std::vector<unique_fd>& profiles_fd,
+ const unique_fd& reference_profile_fd,
+ const std::vector<unique_fd>& apk_fds = std::vector<unique_fd>(),
+ const std::vector<std::string>& dex_locations = std::vector<std::string>()) {
+ SetupArgs(profiles_fd,
+ reference_profile_fd,
+ apk_fds,
+ dex_locations,
+ /*copy_and_update=*/false);
}
- // Do not add after dex2oat_flags, they should override others for debugging.
+ void SetupCopyAndUpdate(unique_fd&& profile_fd,
+ unique_fd&& reference_profile_fd,
+ unique_fd&& apk_fd,
+ const std::string& dex_location) {
+ std::vector<unique_fd> profiles_fd;
+ profiles_fd.push_back(std::move(profile_fd));
+ std::vector<unique_fd> apk_fds;
+ profiles_fd.push_back(std::move(apk_fd));
+ std::vector<std::string> dex_locations = {dex_location};
+ SetupArgs(profiles_fd, reference_profile_fd, apk_fds, dex_locations,
+ /*copy_and_update=*/true);
+ }
- ExecVWithArgs(profman_bin, args);
- PLOG(ERROR) << "execv(" << profman_bin << ") failed";
- exit(DexoptReturnCodes::kProfmanExec); /* only get here on exec failure */
-}
+ void SetupDump(const std::vector<unique_fd>& profiles_fd,
+ const unique_fd& reference_profile_fd,
+ const std::vector<std::string>& dex_locations,
+ const std::vector<unique_fd>& apk_fds,
+ const unique_fd& output_fd) {
+ AddArg("--dump-only");
+ AddArg(StringPrintf("--dump-output-to-fd=%d", output_fd.get()));
+ SetupArgs(profiles_fd, reference_profile_fd, apk_fds, dex_locations,
+ /*copy_and_update=*/false);
+ }
-[[ noreturn ]]
-static void run_profman_merge(const std::vector<unique_fd>& profiles_fd,
- const unique_fd& reference_profile_fd,
- const std::vector<unique_fd>* apk_fds = nullptr,
- const std::vector<std::string>* dex_locations = nullptr) {
- run_profman(profiles_fd, reference_profile_fd, apk_fds, dex_locations,
- /*copy_and_update*/false);
-}
+ void Exec() {
+ ExecVHelper::Exec(DexoptReturnCodes::kProfmanExec);
+ }
+};
-[[ noreturn ]]
-static void run_profman_copy_and_update(unique_fd&& profile_fd,
- unique_fd&& reference_profile_fd,
- unique_fd&& apk_fd,
- const std::string& dex_location) {
- std::vector<unique_fd> profiles_fd;
- profiles_fd.push_back(std::move(profile_fd));
- std::vector<unique_fd> apk_fds;
- apk_fds.push_back(std::move(apk_fd));
- std::vector<std::string> dex_locations;
- dex_locations.push_back(dex_location);
- run_profman(profiles_fd, reference_profile_fd, &apk_fds, &dex_locations,
- /*copy_and_update*/true);
-}
// Decides if profile guided compilation is needed or not based on existing profiles.
// The location is the package name for primary apks or the dex path for secondary dex files.
@@ -697,11 +746,13 @@
return false;
}
+ RunProfman profman_merge;
+ profman_merge.SetupMerge(profiles_fd, reference_profile_fd);
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
drop_capabilities(uid);
- run_profman_merge(profiles_fd, reference_profile_fd);
+ profman_merge.Exec();
}
/* parent */
int return_code = wait_child(pid);
@@ -774,35 +825,6 @@
return analyze_profiles(uid, package_name, profile_name, /*is_secondary_dex*/false);
}
-[[ noreturn ]]
-static void run_profman_dump(const std::vector<unique_fd>& profile_fds,
- const unique_fd& reference_profile_fd,
- const std::vector<std::string>& dex_locations,
- const std::vector<unique_fd>& apk_fds,
- const unique_fd& output_fd) {
- std::vector<std::string> profman_args;
- static const char* PROFMAN_BIN = "/system/bin/profman";
- profman_args.push_back("--dump-only");
- profman_args.push_back(StringPrintf("--dump-output-to-fd=%d", output_fd.get()));
- if (reference_profile_fd != -1) {
- profman_args.push_back(StringPrintf("--reference-profile-file-fd=%d",
- reference_profile_fd.get()));
- }
- for (size_t i = 0; i < profile_fds.size(); i++) {
- profman_args.push_back(StringPrintf("--profile-file-fd=%d", profile_fds[i].get()));
- }
- for (const std::string& dex_location : dex_locations) {
- profman_args.push_back(StringPrintf("--dex-location=%s", dex_location.c_str()));
- }
- for (size_t i = 0; i < apk_fds.size(); i++) {
- profman_args.push_back(StringPrintf("--apk-fd=%d", apk_fds[i].get()));
- }
-
- ExecVWithArgs(PROFMAN_BIN, profman_args);
- PLOG(ERROR) << "execv(" << PROFMAN_BIN << ") failed";
- exit(DexoptReturnCodes::kProfmanExec); /* only get here on exec failure */
-}
-
bool dump_profiles(int32_t uid, const std::string& pkgname, const std::string& profile_name,
const std::string& code_path) {
std::vector<unique_fd> profile_fds;
@@ -839,12 +861,13 @@
apk_fds.push_back(std::move(apk_fd));
+ RunProfman profman_dump;
+ profman_dump.SetupDump(profile_fds, reference_profile_fd, dex_locations, apk_fds, output_fd);
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
drop_capabilities(uid);
- run_profman_dump(profile_fds, reference_profile_fd, dex_locations,
- apk_fds, output_fd);
+ profman_dump.Exec();
}
/* parent */
int return_code = wait_child(pid);
@@ -1416,55 +1439,60 @@
// The analyzer will check if the dex_file needs to be (re)compiled to match the compiler_filter.
// If this is for a profile guided compilation, profile_was_updated will tell whether or not
// the profile has changed.
-static void exec_dexoptanalyzer(const std::string& dex_file, int vdex_fd, int oat_fd,
- int zip_fd, const std::string& instruction_set, const std::string& compiler_filter,
- bool profile_was_updated, bool downgrade,
- const char* class_loader_context) {
- CHECK_GE(zip_fd, 0);
- const char* dexoptanalyzer_bin =
- is_debug_runtime()
- ? "/system/bin/dexoptanalyzerd"
- : "/system/bin/dexoptanalyzer";
+class RunDexoptAnalyzer : public ExecVHelper {
+ public:
+ RunDexoptAnalyzer(const std::string& dex_file,
+ int vdex_fd,
+ int oat_fd,
+ int zip_fd,
+ const std::string& instruction_set,
+ const std::string& compiler_filter,
+ bool profile_was_updated,
+ bool downgrade,
+ const char* class_loader_context) {
+ CHECK_GE(zip_fd, 0);
+ const char* dexoptanalyzer_bin =
+ is_debug_runtime()
+ ? "/system/bin/dexoptanalyzerd"
+ : "/system/bin/dexoptanalyzer";
- std::string dex_file_arg = "--dex-file=" + dex_file;
- std::string oat_fd_arg = "--oat-fd=" + std::to_string(oat_fd);
- std::string vdex_fd_arg = "--vdex-fd=" + std::to_string(vdex_fd);
- std::string zip_fd_arg = "--zip-fd=" + std::to_string(zip_fd);
- std::string isa_arg = "--isa=" + instruction_set;
- std::string compiler_filter_arg = "--compiler-filter=" + compiler_filter;
- const char* assume_profile_changed = "--assume-profile-changed";
- const char* downgrade_flag = "--downgrade";
- std::string class_loader_context_arg = "--class-loader-context=";
- if (class_loader_context != nullptr) {
- class_loader_context_arg += class_loader_context;
- }
+ std::string dex_file_arg = "--dex-file=" + dex_file;
+ std::string oat_fd_arg = "--oat-fd=" + std::to_string(oat_fd);
+ std::string vdex_fd_arg = "--vdex-fd=" + std::to_string(vdex_fd);
+ std::string zip_fd_arg = "--zip-fd=" + std::to_string(zip_fd);
+ std::string isa_arg = "--isa=" + instruction_set;
+ std::string compiler_filter_arg = "--compiler-filter=" + compiler_filter;
+ const char* assume_profile_changed = "--assume-profile-changed";
+ const char* downgrade_flag = "--downgrade";
+ std::string class_loader_context_arg = "--class-loader-context=";
+ if (class_loader_context != nullptr) {
+ class_loader_context_arg += class_loader_context;
+ }
- // program name, dex file, isa, filter
- std::vector<std::string> args = {
- dex_file_arg,
- isa_arg,
- compiler_filter_arg,
- };
- if (oat_fd >= 0) {
- args.push_back(oat_fd_arg);
- }
- if (vdex_fd >= 0) {
- args.push_back(vdex_fd_arg);
- }
- args.push_back(zip_fd_arg.c_str());
- if (profile_was_updated) {
- args.push_back(assume_profile_changed);
- }
- if (downgrade) {
- args.push_back(downgrade_flag);
- }
- if (class_loader_context != nullptr) {
- args.push_back(class_loader_context_arg.c_str());
- }
+ // program name, dex file, isa, filter
+ AddArg(dex_file_arg);
+ AddArg(isa_arg);
+ AddArg(compiler_filter_arg);
+ if (oat_fd >= 0) {
+ AddArg(oat_fd_arg);
+ }
+ if (vdex_fd >= 0) {
+ AddArg(vdex_fd_arg);
+ }
+ AddArg(zip_fd_arg.c_str());
+ if (profile_was_updated) {
+ AddArg(assume_profile_changed);
+ }
+ if (downgrade) {
+ AddArg(downgrade_flag);
+ }
+ if (class_loader_context != nullptr) {
+ AddArg(class_loader_context_arg.c_str());
+ }
- ExecVWithArgs(dexoptanalyzer_bin, args);
- ALOGE("execv(%s) failed: %s\n", dexoptanalyzer_bin, strerror(errno));
-}
+ PrepareArgs(dexoptanalyzer_bin);
+ }
+};
// Prepares the oat dir for the secondary dex files.
static bool prepare_secondary_dex_oat_dir(const std::string& dex_path, int uid,
@@ -1716,16 +1744,17 @@
/*is_secondary_dex*/true);
// Run dexoptanalyzer to get dexopt_needed code. This is not expected to return.
- exec_dexoptanalyzer(dex_path,
- vdex_file_fd.get(),
- oat_file_fd.get(),
- zip_fd.get(),
- instruction_set,
- compiler_filter, profile_was_updated,
- downgrade,
- class_loader_context);
- PLOG(ERROR) << "Failed to exec dexoptanalyzer";
- _exit(kSecondaryDexDexoptAnalyzerSkippedFailExec);
+ // Note that we do not do it before the fork since opening the files is required to happen
+ // after forking.
+ RunDexoptAnalyzer run_dexopt_analyzer(dex_path,
+ vdex_file_fd.get(),
+ oat_file_fd.get(),
+ zip_fd.get(),
+ instruction_set,
+ compiler_filter, profile_was_updated,
+ downgrade,
+ class_loader_context);
+ run_dexopt_analyzer.Exec(kSecondaryDexDexoptAnalyzerSkippedFailExec);
}
/* parent */
@@ -1894,6 +1923,27 @@
LOG(VERBOSE) << "DexInv: --- BEGIN '" << dex_path << "' ---";
+ RunDex2Oat runner(input_fd.get(),
+ out_oat_fd.get(),
+ in_vdex_fd.get(),
+ out_vdex_fd.get(),
+ image_fd.get(),
+ dex_path,
+ out_oat_path,
+ swap_fd.get(),
+ instruction_set,
+ compiler_filter,
+ debuggable,
+ boot_complete,
+ background_job_compile,
+ reference_profile_fd.get(),
+ class_loader_context,
+ target_sdk_version,
+ enable_hidden_api_checks,
+ generate_compact_dex,
+ dex_metadata_fd.get(),
+ compilation_reason);
+
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
@@ -1905,26 +1955,7 @@
_exit(DexoptReturnCodes::kFlock);
}
- run_dex2oat(input_fd.get(),
- out_oat_fd.get(),
- in_vdex_fd.get(),
- out_vdex_fd.get(),
- image_fd.get(),
- dex_path,
- out_oat_path,
- swap_fd.get(),
- instruction_set,
- compiler_filter,
- debuggable,
- boot_complete,
- background_job_compile,
- reference_profile_fd.get(),
- class_loader_context,
- target_sdk_version,
- enable_hidden_api_checks,
- generate_compact_dex,
- dex_metadata_fd.get(),
- compilation_reason);
+ runner.Exec(DexoptReturnCodes::kDex2oatExec);
} else {
int res = wait_child(pid);
if (res == 0) {
@@ -2488,11 +2519,13 @@
return false;
}
+ RunProfman args;
+ args.SetupMerge(profiles_fd, snapshot_fd, apk_fds, dex_locations);
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
drop_capabilities(app_shared_gid);
- run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
+ args.Exec();
}
/* parent */
@@ -2572,6 +2605,8 @@
profiles_fd.push_back(std::move(fd));
}
}
+ RunProfman args;
+ args.SetupMerge(profiles_fd, snapshot_fd, apk_fds, dex_locations);
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
@@ -2579,7 +2614,7 @@
// The introduction of new access flags into boot jars causes them to
// fail dex file verification.
- run_profman_merge(profiles_fd, snapshot_fd, &apk_fds, &dex_locations);
+ args.Exec();
}
/* parent */
@@ -2633,6 +2668,11 @@
return false;
}
+ RunProfman args;
+ args.SetupCopyAndUpdate(std::move(dex_metadata_fd),
+ std::move(ref_profile_fd),
+ std::move(apk_fd),
+ code_path);
pid_t pid = fork();
if (pid == 0) {
/* child -- drop privileges before continuing */
@@ -2640,10 +2680,7 @@
drop_capabilities(app_shared_gid);
// The copy and update takes ownership over the fds.
- run_profman_copy_and_update(std::move(dex_metadata_fd),
- std::move(ref_profile_fd),
- std::move(apk_fd),
- code_path);
+ args.Exec();
}
/* parent */
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel.h b/libs/binder/ndk/include_ndk/android/binder_parcel.h
index 4e0132b..a303f4a 100644
--- a/libs/binder/ndk/include_ndk/android/binder_parcel.h
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel.h
@@ -50,80 +50,120 @@
*/
void AParcel_delete(AParcel* parcel) __INTRODUCED_IN(29);
+/**
+ * This is called to allocate a buffer for a C-style string (null-terminated). The returned buffer
+ * should be at least length bytes. This includes space for a null terminator. length will always be
+ * strictly less than or equal to the maximum size that can be held in a size_t and will always be
+ * greater than 0.
+ *
+ * See also AParcel_readString.
+ *
+ * If allocation fails, null should be returned.
+ */
+typedef char* (*AParcel_stringAllocator)(void* stringData, size_t length);
+
+/**
+ * This is called to allocate an array of size 'length'.
+ *
+ * See also AParcel_readStringArray
+ */
+typedef bool (*AParcel_stringArrayAllocator)(void* arrayData, size_t length);
+
+/**
+ * This is called to allocate a string inside of an array that was allocated by an
+ * AParcel_stringArrayAllocator.
+ *
+ * The index returned will always be within the range [0, length of arrayData). The returned buffer
+ * should be at least length bytes. This includes space for a null-terminator. length will always be
+ * strictly less than or equal to the maximum size that can be held in a size_t and will always be
+ * greater than 0.
+ *
+ * See also AParcel_readStringArray
+ */
+typedef char* (*AParcel_stringArrayElementAllocator)(void* arrayData, size_t index, size_t length);
+
+/**
+ * This returns the length and buffer of an array at a specific index in an arrayData object.
+ *
+ * See also AParcel_writeStringArray
+ */
+typedef const char* (*AParcel_stringArrayElementGetter)(const void* arrayData, size_t index,
+ size_t* outLength);
+
// @START-PRIMITIVE-VECTOR-GETTERS
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readInt32Array
*/
-typedef int32_t* (*AParcel_int32Allocator)(void* arrayData, size_t length);
+typedef int32_t* (*AParcel_int32ArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readUint32Array
*/
-typedef uint32_t* (*AParcel_uint32Allocator)(void* arrayData, size_t length);
+typedef uint32_t* (*AParcel_uint32ArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readInt64Array
*/
-typedef int64_t* (*AParcel_int64Allocator)(void* arrayData, size_t length);
+typedef int64_t* (*AParcel_int64ArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readUint64Array
*/
-typedef uint64_t* (*AParcel_uint64Allocator)(void* arrayData, size_t length);
+typedef uint64_t* (*AParcel_uint64ArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readFloatArray
*/
-typedef float* (*AParcel_floatAllocator)(void* arrayData, size_t length);
+typedef float* (*AParcel_floatArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readDoubleArray
*/
-typedef double* (*AParcel_doubleAllocator)(void* arrayData, size_t length);
+typedef double* (*AParcel_doubleArrayAllocator)(void* arrayData, size_t length);
/**
- * This allocates an array of length length inside of arrayData and returns whether or not there was
+ * This allocates an array of size 'length' inside of arrayData and returns whether or not there was
* a success.
*
* See also AParcel_readBoolArray
*/
-typedef bool (*AParcel_boolAllocator)(void* arrayData, size_t length);
+typedef bool (*AParcel_boolArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object at index.
@@ -142,38 +182,28 @@
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readCharArray
*/
-typedef char16_t* (*AParcel_charAllocator)(void* arrayData, size_t length);
+typedef char16_t* (*AParcel_charArrayAllocator)(void* arrayData, size_t length);
/**
* This is called to get the underlying data from an arrayData object.
*
- * The implementation of this function should allocate a contiguous array of length length and
+ * The implementation of this function should allocate a contiguous array of size 'length' and
* return that underlying buffer to be filled out. If there is an error or length is 0, null may be
* returned.
*
* See also AParcel_readByteArray
*/
-typedef int8_t* (*AParcel_byteAllocator)(void* arrayData, size_t length);
+typedef int8_t* (*AParcel_byteArrayAllocator)(void* arrayData, size_t length);
// @END-PRIMITIVE-VECTOR-GETTERS
/**
- * This is called to allocate a buffer for a C-style string (null-terminated). The buffer should be
- * of length length which includes space for the null-terminator.
- *
- * See also AParcel_readString.
- *
- * If allocation fails, null should be returned.
- */
-typedef char* (*AParcel_stringAllocator)(void* stringData, size_t length);
-
-/**
* Writes an AIBinder to the next location in a non-null parcel. Can be null.
*/
binder_status_t AParcel_writeStrongBinder(AParcel* parcel, AIBinder* binder) __INTRODUCED_IN(29);
@@ -229,20 +259,45 @@
__INTRODUCED_IN(29);
/**
- * Writes string value to the next location in a non-null parcel.
+ * Writes utf-8 string value to the next location in a non-null parcel.
*/
binder_status_t AParcel_writeString(AParcel* parcel, const char* string, size_t length)
__INTRODUCED_IN(29);
/**
- * Reads and allocates string value from the next location in a non-null parcel.
+ * Reads and allocates utf-8 string value from the next location in a non-null parcel.
*
* Data is passed to the string allocator once the string size is known. This size includes the
* space for the null-terminator of this string. This allocator returns a buffer which is used as
* the output buffer from this read.
*/
-binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocator allocator,
- void* stringData) __INTRODUCED_IN(29);
+binder_status_t AParcel_readString(const AParcel* parcel, void* stringData,
+ AParcel_stringAllocator allocator) __INTRODUCED_IN(29);
+
+/**
+ * Writes utf-8 string array data to the next location in a non-null parcel.
+ *
+ * length is the length of the array. AParcel_stringArrayElementGetter will be called for all
+ * indices in range [0, length) with the arrayData provided here. The string length and buffer
+ * returned from this function will be used to fill out the data from the parcel.
+ */
+binder_status_t AParcel_writeStringArray(AParcel* parcel, const void* arrayData, size_t length,
+ AParcel_stringArrayElementGetter getter)
+ __INTRODUCED_IN(29);
+
+/**
+ * Reads and allocates utf-8 string array value from the next location in a non-null parcel.
+ *
+ * First, AParcel_stringArrayAllocator will be called with the size of the array to be read where
+ * length is the length of the array to be read from the parcel. Then, for each index i in [0,
+ * length), AParcel_stringArrayElementAllocator will be called with the length of the string to be
+ * read from the parcel. The resultant buffer from each of these calls will be filled according to
+ * the contents of the string that is read.
+ */
+binder_status_t AParcel_readStringArray(const AParcel* parcel, void* arrayData,
+ AParcel_stringArrayAllocator allocator,
+ AParcel_stringArrayElementAllocator elementAllocator)
+ __INTRODUCED_IN(29);
// @START-PRIMITIVE-READ-WRITE
/**
@@ -377,9 +432,8 @@
* getter(arrayData, i) will be called for each i in [0, length) in order to get the underlying
* values to write to the parcel.
*/
-binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData,
- AParcel_boolArrayGetter getter, size_t length)
- __INTRODUCED_IN(29);
+binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, size_t length,
+ AParcel_boolArrayGetter getter) __INTRODUCED_IN(29);
/**
* Writes an array of char16_t to the next location in a non-null parcel.
@@ -401,7 +455,7 @@
* corresponding data
*/
binder_status_t AParcel_readInt32Array(const AParcel* parcel, void* arrayData,
- AParcel_int32Allocator allocator) __INTRODUCED_IN(29);
+ AParcel_int32ArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of uint32_t from the next location in a non-null parcel.
@@ -411,7 +465,7 @@
* corresponding data
*/
binder_status_t AParcel_readUint32Array(const AParcel* parcel, void* arrayData,
- AParcel_uint32Allocator allocator) __INTRODUCED_IN(29);
+ AParcel_uint32ArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of int64_t from the next location in a non-null parcel.
@@ -421,7 +475,7 @@
* corresponding data
*/
binder_status_t AParcel_readInt64Array(const AParcel* parcel, void* arrayData,
- AParcel_int64Allocator allocator) __INTRODUCED_IN(29);
+ AParcel_int64ArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of uint64_t from the next location in a non-null parcel.
@@ -431,7 +485,7 @@
* corresponding data
*/
binder_status_t AParcel_readUint64Array(const AParcel* parcel, void* arrayData,
- AParcel_uint64Allocator allocator) __INTRODUCED_IN(29);
+ AParcel_uint64ArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of float from the next location in a non-null parcel.
@@ -441,7 +495,7 @@
* corresponding data
*/
binder_status_t AParcel_readFloatArray(const AParcel* parcel, void* arrayData,
- AParcel_floatAllocator allocator) __INTRODUCED_IN(29);
+ AParcel_floatArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of double from the next location in a non-null parcel.
@@ -451,7 +505,7 @@
* corresponding data
*/
binder_status_t AParcel_readDoubleArray(const AParcel* parcel, void* arrayData,
- AParcel_doubleAllocator allocator) __INTRODUCED_IN(29);
+ AParcel_doubleArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of bool from the next location in a non-null parcel.
@@ -460,7 +514,7 @@
* setter(arrayData, i, x) will be called where x is the value at the associated index.
*/
binder_status_t AParcel_readBoolArray(const AParcel* parcel, void* arrayData,
- AParcel_boolAllocator allocator,
+ AParcel_boolArrayAllocator allocator,
AParcel_boolArraySetter setter) __INTRODUCED_IN(29);
/**
@@ -471,7 +525,7 @@
* corresponding data
*/
binder_status_t AParcel_readCharArray(const AParcel* parcel, void* arrayData,
- AParcel_charAllocator allocator) __INTRODUCED_IN(29);
+ AParcel_charArrayAllocator allocator) __INTRODUCED_IN(29);
/**
* Reads an array of int8_t from the next location in a non-null parcel.
@@ -481,7 +535,7 @@
* corresponding data
*/
binder_status_t AParcel_readByteArray(const AParcel* parcel, void* arrayData,
- AParcel_byteAllocator allocator) __INTRODUCED_IN(29);
+ AParcel_byteArrayAllocator allocator) __INTRODUCED_IN(29);
// @END-PRIMITIVE-READ-WRITE
diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
index 6e41a7f..3fcb121 100644
--- a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
+++ b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h
@@ -36,7 +36,7 @@
namespace ndk {
/**
- * This retrieves and allocates a vector to length length and returns the underlying buffer.
+ * This retrieves and allocates a vector to size 'length' and returns the underlying buffer.
*/
template <typename T>
static inline T* AParcel_stdVectorAllocator(void* vectorData, size_t length) {
@@ -48,10 +48,18 @@
}
/**
- * This allocates a vector to length length and returns whether the allocation is successful.
+ * This allocates a vector to size 'length' and returns whether the allocation is successful.
+ *
+ * See also AParcel_stdVectorAllocator. Types used with this allocator have their sizes defined
+ * externally with respect to the NDK, and that size information is not passed into the NDK.
+ * Instead, it is used in cases where callbacks are used.
+ *
+ * See AParcel_readVector(const AParcel* parcel, std::vector<bool>)
+ * See AParcel_readVector(const AParcel* parcel, std::vector<std::string>)
*/
-static inline bool AParcel_stdVectorBoolAllocator(void* vectorData, size_t length) {
- std::vector<bool>* vec = static_cast<std::vector<bool>*>(vectorData);
+template <typename T>
+static inline bool AParcel_stdVectorExternalAllocator(void* vectorData, size_t length) {
+ std::vector<T>* vec = static_cast<std::vector<T>*>(vectorData);
if (length > vec->max_size()) return false;
vec->resize(length);
@@ -173,8 +181,8 @@
* Writes a vector of bool to the next location in a non-null parcel.
*/
inline binder_status_t AParcel_writeVector(AParcel* parcel, const std::vector<bool>& vec) {
- return AParcel_writeBoolArray(parcel, static_cast<const void*>(&vec),
- AParcel_stdVectorGetter<bool>, vec.size());
+ return AParcel_writeBoolArray(parcel, static_cast<const void*>(&vec), vec.size(),
+ AParcel_stdVectorGetter<bool>);
}
/**
@@ -182,7 +190,7 @@
*/
inline binder_status_t AParcel_readVector(const AParcel* parcel, std::vector<bool>* vec) {
void* vectorData = static_cast<void*>(vec);
- return AParcel_readBoolArray(parcel, vectorData, AParcel_stdVectorBoolAllocator,
+ return AParcel_readBoolArray(parcel, vectorData, AParcel_stdVectorExternalAllocator<bool>,
AParcel_stdVectorSetter<bool>);
}
@@ -229,6 +237,32 @@
}
/**
+ * Allocates a std::string inside of a std::vector<std::string> at index index to size 'length'.
+ */
+static inline char* AParcel_stdVectorStringElementAllocator(void* vectorData, size_t index,
+ size_t length) {
+ std::vector<std::string>* vec = static_cast<std::vector<std::string>*>(vectorData);
+
+ std::string& element = vec->at(index);
+ element.resize(length - 1);
+ return &element[0];
+}
+
+/**
+ * This gets the length and buffer of a std::string inside of a std::vector<std::string> at index
+ * index.
+ */
+static inline const char* AParcel_stdVectorStringElementGetter(const void* vectorData, size_t index,
+ size_t* outLength) {
+ const std::vector<std::string>* vec = static_cast<const std::vector<std::string>*>(vectorData);
+
+ const std::string& element = vec->at(index);
+
+ *outLength = element.size();
+ return element.c_str();
+}
+
+/**
* Convenience API for writing a std::string.
*/
static inline binder_status_t AParcel_writeString(AParcel* parcel, const std::string& str) {
@@ -240,7 +274,28 @@
*/
static inline binder_status_t AParcel_readString(const AParcel* parcel, std::string* str) {
void* stringData = static_cast<void*>(str);
- return AParcel_readString(parcel, AParcel_stdStringAllocator, stringData);
+ return AParcel_readString(parcel, stringData, AParcel_stdStringAllocator);
+}
+
+/**
+ * Convenience API for writing a std::vector<std::string>
+ */
+static inline binder_status_t AParcel_writeVector(AParcel* parcel,
+ const std::vector<std::string>& vec) {
+ const void* vectorData = static_cast<const void*>(&vec);
+ return AParcel_writeStringArray(parcel, vectorData, vec.size(),
+ AParcel_stdVectorStringElementGetter);
+}
+
+/**
+ * Convenience API for reading a std::vector<std::string>
+ */
+static inline binder_status_t AParcel_readVector(const AParcel* parcel,
+ std::vector<std::string>* vec) {
+ void* vectorData = static_cast<void*>(vec);
+ return AParcel_readStringArray(parcel, vectorData,
+ AParcel_stdVectorExternalAllocator<std::string>,
+ AParcel_stdVectorStringElementAllocator);
}
template <typename T>
diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt
index ec6587a..d2c1a3d 100644
--- a/libs/binder/ndk/libbinder_ndk.map.txt
+++ b/libs/binder/ndk/libbinder_ndk.map.txt
@@ -41,6 +41,7 @@
AParcel_readParcelFileDescriptor;
AParcel_readStatusHeader;
AParcel_readString;
+ AParcel_readStringArray;
AParcel_readStrongBinder;
AParcel_readUint32;
AParcel_readUint32Array;
@@ -63,6 +64,7 @@
AParcel_writeParcelFileDescriptor;
AParcel_writeStatusHeader;
AParcel_writeString;
+ AParcel_writeStringArray;
AParcel_writeStrongBinder;
AParcel_writeUint32;
AParcel_writeUint32Array;
diff --git a/libs/binder/ndk/parcel.cpp b/libs/binder/ndk/parcel.cpp
index 77c0558..3b44a62 100644
--- a/libs/binder/ndk/parcel.cpp
+++ b/libs/binder/ndk/parcel.cpp
@@ -142,8 +142,8 @@
}
template <typename T>
-binder_status_t WriteArray(AParcel* parcel, const void* arrayData, ArrayGetter<T> getter,
- size_t length, status_t (Parcel::*write)(T)) {
+binder_status_t WriteArray(AParcel* parcel, const void* arrayData, size_t length,
+ ArrayGetter<T> getter, status_t (Parcel::*write)(T)) {
if (length > std::numeric_limits<int32_t>::max()) return STATUS_BAD_VALUE;
Parcel* rawParcel = parcel->get();
@@ -273,8 +273,8 @@
return STATUS_OK;
}
-binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocator allocator,
- void* stringData) {
+binder_status_t AParcel_readString(const AParcel* parcel, void* stringData,
+ AParcel_stringAllocator allocator) {
size_t len16;
const char16_t* str16 = parcel->get()->readString16Inplace(&len16);
@@ -291,7 +291,7 @@
len8 = utf16_to_utf8_length(str16, len16) + 1;
}
- if (len8 <= 0 || len8 >= std::numeric_limits<int32_t>::max()) {
+ if (len8 <= 0 || len8 > std::numeric_limits<int32_t>::max()) {
LOG(WARNING) << __func__ << ": Invalid string length: " << len8;
return STATUS_BAD_VALUE;
}
@@ -308,6 +308,68 @@
return STATUS_OK;
}
+binder_status_t AParcel_writeStringArray(AParcel* parcel, const void* arrayData, size_t length,
+ AParcel_stringArrayElementGetter getter) {
+ if (length > std::numeric_limits<int32_t>::max()) return STATUS_BAD_VALUE;
+
+ Parcel* rawParcel = parcel->get();
+
+ status_t status = rawParcel->writeInt32(static_cast<int32_t>(length));
+ if (status != STATUS_OK) return PruneStatusT(status);
+
+ for (size_t i = 0; i < length; i++) {
+ size_t length = 0;
+ const char* str = getter(arrayData, i, &length);
+ if (str == nullptr) return STATUS_BAD_VALUE;
+
+ binder_status_t status = AParcel_writeString(parcel, str, length);
+ if (status != STATUS_OK) return status;
+ }
+
+ return STATUS_OK;
+}
+
+// This implements AParcel_stringAllocator for a string using an array, index, and element
+// allocator.
+struct StringArrayElementAllocationAdapter {
+ void* arrayData; // stringData from the NDK
+ size_t index; // index into the string array
+ AParcel_stringArrayElementAllocator elementAllocator;
+
+ static char* Allocator(void* stringData, size_t length) {
+ StringArrayElementAllocationAdapter* adapter =
+ static_cast<StringArrayElementAllocationAdapter*>(stringData);
+ return adapter->elementAllocator(adapter->arrayData, adapter->index, length);
+ }
+};
+
+binder_status_t AParcel_readStringArray(const AParcel* parcel, void* arrayData,
+ AParcel_stringArrayAllocator allocator,
+ AParcel_stringArrayElementAllocator elementAllocator) {
+ const Parcel* rawParcel = parcel->get();
+
+ int32_t length;
+ status_t status = rawParcel->readInt32(&length);
+
+ if (status != STATUS_OK) return PruneStatusT(status);
+ if (length < 0) return STATUS_UNEXPECTED_NULL;
+
+ if (!allocator(arrayData, length)) return STATUS_NO_MEMORY;
+
+ StringArrayElementAllocationAdapter adapter{
+ .arrayData = arrayData,
+ .index = 0,
+ .elementAllocator = elementAllocator,
+ };
+
+ for (; adapter.index < length; adapter.index++) {
+ AParcel_readString(parcel, static_cast<void*>(&adapter),
+ StringArrayElementAllocationAdapter::Allocator);
+ }
+
+ return STATUS_OK;
+}
+
// See gen_parcel_helper.py. These auto-generated read/write methods use the same types for
// libbinder and this library.
// @START
@@ -425,9 +487,9 @@
return WriteArray<double>(parcel, value, length);
}
-binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData,
- AParcel_boolArrayGetter getter, size_t length) {
- return WriteArray<bool>(parcel, arrayData, getter, length, &Parcel::writeBool);
+binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, size_t length,
+ AParcel_boolArrayGetter getter) {
+ return WriteArray<bool>(parcel, arrayData, length, getter, &Parcel::writeBool);
}
binder_status_t AParcel_writeCharArray(AParcel* parcel, const char16_t* value, size_t length) {
@@ -439,48 +501,48 @@
}
binder_status_t AParcel_readInt32Array(const AParcel* parcel, void* arrayData,
- AParcel_int32Allocator allocator) {
+ AParcel_int32ArrayAllocator allocator) {
return ReadArray<int32_t>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readUint32Array(const AParcel* parcel, void* arrayData,
- AParcel_uint32Allocator allocator) {
+ AParcel_uint32ArrayAllocator allocator) {
return ReadArray<uint32_t>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readInt64Array(const AParcel* parcel, void* arrayData,
- AParcel_int64Allocator allocator) {
+ AParcel_int64ArrayAllocator allocator) {
return ReadArray<int64_t>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readUint64Array(const AParcel* parcel, void* arrayData,
- AParcel_uint64Allocator allocator) {
+ AParcel_uint64ArrayAllocator allocator) {
return ReadArray<uint64_t>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readFloatArray(const AParcel* parcel, void* arrayData,
- AParcel_floatAllocator allocator) {
+ AParcel_floatArrayAllocator allocator) {
return ReadArray<float>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readDoubleArray(const AParcel* parcel, void* arrayData,
- AParcel_doubleAllocator allocator) {
+ AParcel_doubleArrayAllocator allocator) {
return ReadArray<double>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readBoolArray(const AParcel* parcel, void* arrayData,
- AParcel_boolAllocator allocator,
+ AParcel_boolArrayAllocator allocator,
AParcel_boolArraySetter setter) {
return ReadArray<bool>(parcel, arrayData, allocator, setter, &Parcel::readBool);
}
binder_status_t AParcel_readCharArray(const AParcel* parcel, void* arrayData,
- AParcel_charAllocator allocator) {
+ AParcel_charArrayAllocator allocator) {
return ReadArray<char16_t>(parcel, arrayData, allocator);
}
binder_status_t AParcel_readByteArray(const AParcel* parcel, void* arrayData,
- AParcel_byteAllocator allocator) {
+ AParcel_byteArrayAllocator allocator) {
return ReadArray<int8_t>(parcel, arrayData, allocator);
}
diff --git a/libs/binder/ndk/scripts/gen_parcel_helper.py b/libs/binder/ndk/scripts/gen_parcel_helper.py
index 86cc57e..0e10220 100755
--- a/libs/binder/ndk/scripts/gen_parcel_helper.py
+++ b/libs/binder/ndk/scripts/gen_parcel_helper.py
@@ -89,10 +89,10 @@
for pretty, cpp in data_types:
nca = pretty in non_contiguously_addressable
- arg_type = "const " + cpp + "* value"
- if nca: arg_type = "const void* arrayData, AParcel_" + pretty.lower() + "ArrayGetter getter"
+ arg_types = "const " + cpp + "* value, size_t length"
+ if nca: arg_types = "const void* arrayData, size_t length, AParcel_" + pretty.lower() + "ArrayGetter getter"
args = "value, length"
- if nca: args = "arrayData, getter, length, &Parcel::write" + pretty
+ if nca: args = "arrayData, length, getter, &Parcel::write" + pretty
header += "/**\n"
header += " * Writes an array of " + cpp + " to the next location in a non-null parcel.\n"
@@ -101,8 +101,8 @@
header += " * getter(arrayData, i) will be called for each i in [0, length) in order to get the underlying values to write "
header += "to the parcel.\n"
header += " */\n"
- header += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_type + ", size_t length) __INTRODUCED_IN(29);\n\n"
- source += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_type + ", size_t length) {\n"
+ header += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_types + ") __INTRODUCED_IN(29);\n\n"
+ source += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_types + ") {\n"
source += " return WriteArray<" + cpp + ">(parcel, " + args + ");\n";
source += "}\n\n"
@@ -111,13 +111,13 @@
read_func = "AParcel_read" + pretty + "Array"
write_func = "AParcel_write" + pretty + "Array"
- allocator_type = "AParcel_" + pretty.lower() + "Allocator"
+ allocator_type = "AParcel_" + pretty.lower() + "ArrayAllocator"
getter_type = "AParcel_" + pretty.lower() + "ArrayGetter"
setter_type = "AParcel_" + pretty.lower() + "ArraySetter"
if nca:
pre_header += "/**\n"
- pre_header += " * This allocates an array of length length inside of arrayData and returns whether or not there was "
+ pre_header += " * This allocates an array of size 'length' inside of arrayData and returns whether or not there was "
pre_header += "a success.\n"
pre_header += " *\n"
pre_header += " * See also " + read_func + "\n"
@@ -141,7 +141,7 @@
pre_header += "/**\n"
pre_header += " * This is called to get the underlying data from an arrayData object.\n"
pre_header += " *\n"
- pre_header += " * The implementation of this function should allocate a contiguous array of length length and "
+ pre_header += " * The implementation of this function should allocate a contiguous array of size 'length' and "
pre_header += "return that underlying buffer to be filled out. If there is an error or length is 0, null may be "
pre_header += "returned.\n"
pre_header += " *\n"
@@ -178,9 +178,9 @@
cpp_helper += " * Writes a vector of " + cpp + " to the next location in a non-null parcel.\n"
cpp_helper += " */\n"
cpp_helper += "inline binder_status_t AParcel_writeVector(AParcel* parcel, const std::vector<" + cpp + ">& vec) {\n"
- write_args = "vec.data()"
- if nca: write_args = "static_cast<const void*>(&vec), AParcel_stdVectorGetter<" + cpp + ">"
- cpp_helper += " return AParcel_write" + pretty + "Array(parcel, " + write_args + ", vec.size());\n"
+ write_args = "vec.data(), vec.size()"
+ if nca: write_args = "static_cast<const void*>(&vec), vec.size(), AParcel_stdVectorGetter<" + cpp + ">"
+ cpp_helper += " return AParcel_write" + pretty + "Array(parcel, " + write_args + ");\n"
cpp_helper += "}\n\n"
cpp_helper += "/**\n"
@@ -192,7 +192,7 @@
read_args += ["parcel"]
read_args += ["vectorData"]
if nca:
- read_args += ["AParcel_stdVectorBoolAllocator"]
+ read_args += ["AParcel_stdVectorExternalAllocator<bool>"]
read_args += ["AParcel_stdVectorSetter<" + cpp + ">"]
else:
read_args += ["AParcel_stdVectorAllocator<" + cpp + ">"]
diff --git a/libs/gui/ISurfaceComposer.cpp b/libs/gui/ISurfaceComposer.cpp
index e66c0e5..accf72c 100644
--- a/libs/gui/ISurfaceComposer.cpp
+++ b/libs/gui/ISurfaceComposer.cpp
@@ -588,19 +588,16 @@
return error;
}
- virtual bool isColorManagementUsed() const {
+ virtual status_t getColorManagement(bool* outGetColorManagement) const {
Parcel data, reply;
data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
- remote()->transact(BnSurfaceComposer::IS_COLOR_MANAGEMET_USED, data, &reply);
- int32_t result = 0;
- status_t err = reply.readInt32(&result);
- if (err != NO_ERROR) {
- ALOGE("ISurfaceComposer::isColorManagementUsed: error "
- "retrieving result: %s (%d)",
- strerror(-err), -err);
- return false;
+ remote()->transact(BnSurfaceComposer::GET_COLOR_MANAGEMENT, data, &reply);
+ bool result;
+ status_t err = reply.readBool(&result);
+ if (err == NO_ERROR) {
+ *outGetColorManagement = result;
}
- return result != 0;
+ return err;
}
};
@@ -945,11 +942,14 @@
}
return NO_ERROR;
}
- case IS_COLOR_MANAGEMET_USED: {
+ case GET_COLOR_MANAGEMENT: {
CHECK_INTERFACE(ISurfaceComposer, data, reply);
- int32_t result = isColorManagementUsed() ? 1 : 0;
- reply->writeInt32(result);
- return NO_ERROR;
+ bool result;
+ status_t error = getColorManagement(&result);
+ if (error == NO_ERROR) {
+ reply->writeBool(result);
+ }
+ return result;
}
default: {
return BBinder::onTransact(code, data, reply, flags);
diff --git a/libs/gui/include/gui/ISurfaceComposer.h b/libs/gui/include/gui/ISurfaceComposer.h
index 9316ae6..761f31a 100644
--- a/libs/gui/include/gui/ISurfaceComposer.h
+++ b/libs/gui/include/gui/ISurfaceComposer.h
@@ -280,7 +280,7 @@
*/
virtual status_t getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) const = 0;
- virtual bool isColorManagementUsed() const = 0;
+ virtual status_t getColorManagement(bool* outGetColorManagement) const = 0;
/* Gets the composition preference of the default data space and default pixel format,
* as well as the wide color gamut data space and wide color gamut pixel format.
@@ -331,7 +331,7 @@
GET_LAYER_DEBUG_INFO,
CREATE_SCOPED_CONNECTION,
GET_COMPOSITION_PREFERENCE,
- IS_COLOR_MANAGEMET_USED,
+ GET_COLOR_MANAGEMENT,
};
virtual status_t onTransact(uint32_t code, const Parcel& data,
diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp
index a3e9249..d0600da 100644
--- a/libs/gui/tests/Surface_test.cpp
+++ b/libs/gui/tests/Surface_test.cpp
@@ -635,7 +635,7 @@
return NO_ERROR;
}
- virtual bool isColorManagementUsed() const { return false; }
+ virtual status_t getColorManagement(bool* /*outGetColorManagement*/) const { return NO_ERROR; }
protected:
IBinder* onAsBinder() override { return nullptr; }
diff --git a/services/bufferhub/Android.bp b/services/bufferhub/Android.bp
index a9af22f..d03d833 100644
--- a/services/bufferhub/Android.bp
+++ b/services/bufferhub/Android.bp
@@ -23,6 +23,13 @@
],
srcs: [
"BufferHubService.cpp",
+ "BufferNode.cpp",
+ ],
+ header_libs: [
+ "libbufferhub_headers",
+ "libdvr_headers",
+ "libnativewindow_headers",
+ "libpdx_headers",
],
shared_libs: [
"android.frameworks.bufferhub@1.0",
@@ -30,6 +37,7 @@
"libhidltransport",
"libhwbinder",
"liblog",
+ "libui",
"libutils",
],
export_include_dirs: [
@@ -49,6 +57,7 @@
"libhidltransport",
"libhwbinder",
"liblog",
+ "libui",
"libutils",
],
cflags: [
diff --git a/services/bufferhub/BufferNode.cpp b/services/bufferhub/BufferNode.cpp
new file mode 100644
index 0000000..62583a6
--- /dev/null
+++ b/services/bufferhub/BufferNode.cpp
@@ -0,0 +1,100 @@
+#include <errno.h>
+
+#include <bufferhub/BufferNode.h>
+#include <private/dvr/buffer_hub_defs.h>
+#include <ui/GraphicBufferAllocator.h>
+
+namespace android {
+namespace frameworks {
+namespace bufferhub {
+namespace V1_0 {
+namespace implementation {
+
+void BufferNode::InitializeMetadata() {
+ // Using placement new here to reuse shared memory instead of new allocation
+ // Initialize the atomic variables to zero.
+ dvr::BufferHubDefs::MetadataHeader* metadata_header = metadata_.metadata_header();
+ buffer_state_ = new (&metadata_header->buffer_state) std::atomic<uint64_t>(0);
+ fence_state_ = new (&metadata_header->fence_state) std::atomic<uint64_t>(0);
+ active_clients_bit_mask_ =
+ new (&metadata_header->active_clients_bit_mask) std::atomic<uint64_t>(0);
+}
+
+// Allocates a new BufferNode.
+BufferNode::BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t user_metadata_size) {
+ uint32_t out_stride = 0;
+ // graphicBufferId is not used in GraphicBufferAllocator::allocate
+ // TODO(b/112338294) After move to the service folder, stop using the
+ // hardcoded service name "bufferhub".
+ int ret = GraphicBufferAllocator::get().allocate(width, height, format, layer_count, usage,
+ const_cast<const native_handle_t**>(
+ &buffer_handle_),
+ &out_stride,
+ /*graphicBufferId=*/0,
+ /*requestor=*/"bufferhub");
+
+ if (ret != OK || buffer_handle_ == nullptr) {
+ ALOGE("BufferNode::BufferNode: Failed to allocate buffer: %s", strerror(-ret));
+ return;
+ }
+
+ buffer_desc_.width = width;
+ buffer_desc_.height = height;
+ buffer_desc_.layers = layer_count;
+ buffer_desc_.format = format;
+ buffer_desc_.usage = usage;
+ buffer_desc_.stride = out_stride;
+
+ metadata_ = BufferHubMetadata::Create(user_metadata_size);
+ if (!metadata_.IsValid()) {
+ ALOGE("BufferNode::BufferNode: Failed to allocate metadata.");
+ return;
+ }
+ InitializeMetadata();
+}
+
+// Free the handle
+BufferNode::~BufferNode() {
+ if (buffer_handle_ != nullptr) {
+ status_t ret = GraphicBufferAllocator::get().free(buffer_handle_);
+ if (ret != OK) {
+ ALOGE("BufferNode::~BufferNode: Failed to free handle; Got error: %d", ret);
+ }
+ }
+}
+
+uint64_t BufferNode::GetActiveClientsBitMask() const {
+ return active_clients_bit_mask_->load(std::memory_order_acquire);
+}
+
+uint64_t BufferNode::AddNewActiveClientsBitToMask() {
+ uint64_t current_active_clients_bit_mask = GetActiveClientsBitMask();
+ uint64_t client_state_mask = 0ULL;
+ uint64_t updated_active_clients_bit_mask = 0ULL;
+ do {
+ client_state_mask = dvr::BufferHubDefs::FindNextAvailableClientStateMask(
+ current_active_clients_bit_mask);
+ if (client_state_mask == 0ULL) {
+ ALOGE("BufferNode::AddNewActiveClientsBitToMask: reached the maximum "
+ "mumber of channels per buffer node: 32.");
+ errno = E2BIG;
+ return 0ULL;
+ }
+ updated_active_clients_bit_mask = current_active_clients_bit_mask | client_state_mask;
+ } while (!(active_clients_bit_mask_->compare_exchange_weak(current_active_clients_bit_mask,
+ updated_active_clients_bit_mask,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire)));
+ return client_state_mask;
+}
+
+void BufferNode::RemoveClientsBitFromMask(const uint64_t& value) {
+ active_clients_bit_mask_->fetch_and(~value);
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace bufferhub
+} // namespace frameworks
+} // namespace android
diff --git a/services/bufferhub/include/bufferhub/BufferNode.h b/services/bufferhub/include/bufferhub/BufferNode.h
new file mode 100644
index 0000000..ffeacac
--- /dev/null
+++ b/services/bufferhub/include/bufferhub/BufferNode.h
@@ -0,0 +1,84 @@
+#ifndef ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_
+#define ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_
+
+#include <android/hardware_buffer.h>
+#include <ui/BufferHubMetadata.h>
+
+namespace android {
+namespace frameworks {
+namespace bufferhub {
+namespace V1_0 {
+namespace implementation {
+
+class BufferNode {
+public:
+ // Allocates a new BufferNode.
+ BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, uint32_t format,
+ uint64_t usage, size_t user_metadata_size);
+
+ ~BufferNode();
+
+ // Returns whether the object holds a valid metadata.
+ bool IsValid() const { return metadata_.IsValid(); }
+
+ size_t user_metadata_size() const { return metadata_.user_metadata_size(); }
+
+ // Accessors of the buffer description and handle
+ const native_handle_t* buffer_handle() const { return buffer_handle_; }
+ const AHardwareBuffer_Desc& buffer_desc() const { return buffer_desc_; }
+
+ // Accessors of metadata.
+ const BufferHubMetadata& metadata() const { return metadata_; }
+
+ // Gets the current value of active_clients_bit_mask in metadata_ with
+ // std::memory_order_acquire, so that all previous releases of
+ // active_clients_bit_mask from all threads will be returned here.
+ uint64_t GetActiveClientsBitMask() const;
+
+ // Find and add a new client_state_mask to active_clients_bit_mask in
+ // metadata_.
+ // Return the new client_state_mask that is added to active_clients_bit_mask.
+ // Return 0ULL if there are already 32 bp clients of the buffer.
+ uint64_t AddNewActiveClientsBitToMask();
+
+ // Removes the value from active_clients_bit_mask in metadata_ with
+ // std::memory_order_release, so that the change will be visible to any
+ // acquire of active_clients_bit_mask_ in any threads after the succeed of
+ // this operation.
+ void RemoveClientsBitFromMask(const uint64_t& value);
+
+private:
+ // Helper method for constructors to initialize atomic metadata header
+ // variables in shared memory.
+ void InitializeMetadata();
+
+ // Gralloc buffer handles.
+ native_handle_t* buffer_handle_;
+ AHardwareBuffer_Desc buffer_desc_;
+
+ // Metadata in shared memory.
+ BufferHubMetadata metadata_;
+
+ // The following variables are atomic variables in metadata_ that are visible
+ // to Bn object and Bp objects. Please find more info in
+ // BufferHubDefs::MetadataHeader.
+
+ // buffer_state_ tracks the state of the buffer. Buffer can be in one of these
+ // four states: gained, posted, acquired, released.
+ std::atomic<uint64_t>* buffer_state_ = nullptr;
+
+ // TODO(b/112012161): add comments to fence_state_.
+ std::atomic<uint64_t>* fence_state_ = nullptr;
+
+ // active_clients_bit_mask_ tracks all the bp clients of the buffer. It is the
+ // union of all client_state_mask of all bp clients.
+ std::atomic<uint64_t>* active_clients_bit_mask_ = nullptr;
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace bufferhub
+} // namespace frameworks
+} // namespace android
+
+#endif // ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_
diff --git a/services/bufferhub/tests/Android.bp b/services/bufferhub/tests/Android.bp
new file mode 100644
index 0000000..cef31f6
--- /dev/null
+++ b/services/bufferhub/tests/Android.bp
@@ -0,0 +1,24 @@
+cc_test {
+ name: "BufferNode_test",
+ srcs: ["BufferNode_test.cpp"],
+ cflags: [
+ "-DLOG_TAG=\"BufferNode_test\"",
+ "-DTRACE=0",
+ "-DATRACE_TAG=ATRACE_TAG_GRAPHICS",
+ ],
+ header_libs: [
+ "libbufferhub_headers",
+ "libdvr_headers",
+ "libnativewindow_headers",
+ "libpdx_headers",
+ ],
+ shared_libs: [
+ "libbufferhubservice",
+ "libui",
+ ],
+ static_libs: [
+ "libgmock",
+ ],
+ // TODO(b/117568153): Temporarily opt out using libcrt.
+ no_libcrt: true,
+}
\ No newline at end of file
diff --git a/services/bufferhub/tests/BufferNode_test.cpp b/services/bufferhub/tests/BufferNode_test.cpp
new file mode 100644
index 0000000..df31d78
--- /dev/null
+++ b/services/bufferhub/tests/BufferNode_test.cpp
@@ -0,0 +1,110 @@
+#include <bufferhub/BufferNode.h>
+#include <errno.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <ui/GraphicBufferMapper.h>
+
+namespace android {
+namespace frameworks {
+namespace bufferhub {
+namespace V1_0 {
+namespace implementation {
+
+namespace {
+
+using testing::NotNull;
+
+const uint32_t kWidth = 640;
+const uint32_t kHeight = 480;
+const uint32_t kLayerCount = 1;
+const uint32_t kFormat = 1;
+const uint64_t kUsage = 0;
+const size_t kUserMetadataSize = 0;
+const size_t kMaxClientsCount = dvr::BufferHubDefs::kMaxNumberOfClients;
+
+class BufferNodeTest : public ::testing::Test {
+protected:
+ void SetUp() override {
+ buffer_node =
+ new BufferNode(kWidth, kHeight, kLayerCount, kFormat, kUsage, kUserMetadataSize);
+ ASSERT_TRUE(buffer_node->IsValid());
+ }
+
+ void TearDown() override {
+ if (buffer_node != nullptr) {
+ delete buffer_node;
+ }
+ }
+
+ BufferNode* buffer_node = nullptr;
+};
+
+TEST_F(BufferNodeTest, TestCreateBufferNode) {
+ EXPECT_EQ(buffer_node->user_metadata_size(), kUserMetadataSize);
+ // Test the handle just allocated is good (i.e. able to be imported)
+ GraphicBufferMapper& mapper = GraphicBufferMapper::get();
+ const native_handle_t* outHandle;
+ status_t ret =
+ mapper.importBuffer(buffer_node->buffer_handle(), buffer_node->buffer_desc().width,
+ buffer_node->buffer_desc().height,
+ buffer_node->buffer_desc().layers,
+ buffer_node->buffer_desc().format, buffer_node->buffer_desc().usage,
+ buffer_node->buffer_desc().stride, &outHandle);
+ EXPECT_EQ(ret, OK);
+ EXPECT_THAT(outHandle, NotNull());
+}
+
+TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_twoNewClients) {
+ uint64_t new_client_state_mask_1 = buffer_node->AddNewActiveClientsBitToMask();
+ EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), new_client_state_mask_1);
+
+ // Request and add a new client_state_mask again.
+ // Active clients bit mask should be the union of the two new
+ // client_state_masks.
+ uint64_t new_client_state_mask_2 = buffer_node->AddNewActiveClientsBitToMask();
+ EXPECT_EQ(buffer_node->GetActiveClientsBitMask(),
+ new_client_state_mask_1 | new_client_state_mask_2);
+}
+
+TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_32NewClients) {
+ uint64_t new_client_state_mask = 0ULL;
+ uint64_t current_mask = 0ULL;
+ uint64_t expected_mask = 0ULL;
+
+ for (int i = 0; i < kMaxClientsCount; ++i) {
+ new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
+ EXPECT_NE(new_client_state_mask, 0);
+ EXPECT_FALSE(new_client_state_mask & current_mask);
+ expected_mask = current_mask | new_client_state_mask;
+ current_mask = buffer_node->GetActiveClientsBitMask();
+ EXPECT_EQ(current_mask, expected_mask);
+ }
+
+ // Method should fail upon requesting for more than maximum allowable clients.
+ new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
+ EXPECT_EQ(new_client_state_mask, 0ULL);
+ EXPECT_EQ(errno, E2BIG);
+}
+
+TEST_F(BufferNodeTest, TestRemoveActiveClientsBitFromMask) {
+ buffer_node->AddNewActiveClientsBitToMask();
+ uint64_t current_mask = buffer_node->GetActiveClientsBitMask();
+ uint64_t new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
+ EXPECT_NE(buffer_node->GetActiveClientsBitMask(), current_mask);
+
+ buffer_node->RemoveClientsBitFromMask(new_client_state_mask);
+ EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask);
+
+ // Remove the test_mask again to the active client bit mask should not modify
+ // the value of active clients bit mask.
+ buffer_node->RemoveClientsBitFromMask(new_client_state_mask);
+ EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask);
+}
+
+} // namespace
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace bufferhub
+} // namespace frameworks
+} // namespace android
diff --git a/services/inputflinger/Android.bp b/services/inputflinger/Android.bp
index 622a623..9a65452 100644
--- a/services/inputflinger/Android.bp
+++ b/services/inputflinger/Android.bp
@@ -41,8 +41,6 @@
"-Wall",
"-Wextra",
"-Werror",
- // Allow implicit fallthroughs in InputReader.cpp until they are fixed.
- "-Wno-error=implicit-fallthrough",
"-Wno-unused-parameter",
// TODO: Move inputflinger to its own process and mark it hidden
//-fvisibility=hidden
diff --git a/services/inputflinger/InputReader.cpp b/services/inputflinger/InputReader.cpp
index 869a2fc..869bd71 100644
--- a/services/inputflinger/InputReader.cpp
+++ b/services/inputflinger/InputReader.cpp
@@ -1041,7 +1041,7 @@
void InputDevice::dump(std::string& dump) {
InputDeviceInfo deviceInfo;
- getDeviceInfo(& deviceInfo);
+ getDeviceInfo(&deviceInfo);
dump += StringPrintf(INDENT "Device %d: %s\n", deviceInfo.getId(),
deviceInfo.getDisplayName().c_str());
diff --git a/services/inputflinger/tests/InputReader_test.cpp b/services/inputflinger/tests/InputReader_test.cpp
index 707f3c5..6114f51 100644
--- a/services/inputflinger/tests/InputReader_test.cpp
+++ b/services/inputflinger/tests/InputReader_test.cpp
@@ -1082,7 +1082,7 @@
// --- InputReaderPolicyTest ---
class InputReaderPolicyTest : public testing::Test {
- protected:
+protected:
sp<FakeInputReaderPolicy> mFakePolicy;
virtual void SetUp() {
@@ -1101,7 +1101,7 @@
* Such configuration is not currently allowed.
*/
TEST_F(InputReaderPolicyTest, Viewports_GetCleared) {
- const std::string uniqueId = "local:0";
+ static const std::string uniqueId = "local:0";
// We didn't add any viewports yet, so there shouldn't be any.
std::optional<DisplayViewport> internalViewport =
diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp
index e592a8b..c130bc5 100644
--- a/services/surfaceflinger/BufferQueueLayer.cpp
+++ b/services/surfaceflinger/BufferQueueLayer.cpp
@@ -23,7 +23,9 @@
BufferQueueLayer::BufferQueueLayer(const LayerCreationArgs& args) : BufferLayer(args) {}
-BufferQueueLayer::~BufferQueueLayer() = default;
+BufferQueueLayer::~BufferQueueLayer() {
+ mConsumer->abandon();
+}
// -----------------------------------------------------------------------
// Interface implementation for Layer
@@ -33,10 +35,6 @@
mConsumer->setReleaseFence(releaseFence);
}
-void BufferQueueLayer::abandon() {
- mConsumer->abandon();
-}
-
void BufferQueueLayer::setTransformHint(uint32_t orientation) const {
mConsumer->setTransformHint(orientation);
}
@@ -380,7 +378,17 @@
mFlinger->mInterceptor->saveBufferUpdate(this, item.mGraphicBuffer->getWidth(),
item.mGraphicBuffer->getHeight(), item.mFrameNumber);
- mFlinger->signalLayerUpdate();
+
+ // If this layer is orphaned, then we run a fake vsync pulse so that
+ // dequeueBuffer doesn't block indefinitely.
+ if (isRemovedFromCurrentState()) {
+ bool ignored = false;
+ latchBuffer(ignored, systemTime(), Fence::NO_FENCE);
+ usleep(16000);
+ releasePendingBuffer(systemTime());
+ } else {
+ mFlinger->signalLayerUpdate();
+ }
}
void BufferQueueLayer::onFrameReplaced(const BufferItem& item) {
diff --git a/services/surfaceflinger/BufferQueueLayer.h b/services/surfaceflinger/BufferQueueLayer.h
index abe0bc7..c9ebe04 100644
--- a/services/surfaceflinger/BufferQueueLayer.h
+++ b/services/surfaceflinger/BufferQueueLayer.h
@@ -40,8 +40,6 @@
public:
void onLayerDisplayed(const sp<Fence>& releaseFence) override;
- void abandon() override;
-
void setTransformHint(uint32_t orientation) const override;
std::vector<OccupancyTracker::Segment> getOccupancyHistory(bool forceFlush) override;
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 2e564e7..f29dfc0 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -111,6 +111,8 @@
args.flinger->getCompositorTiming(&compositorTiming);
mFrameEventHistory.initializeCompositorTiming(compositorTiming);
mFrameTracker.setDisplayRefreshPeriod(compositorTiming.interval);
+
+ mFlinger->onLayerCreated();
}
Layer::~Layer() {
@@ -119,13 +121,11 @@
c->detachLayer(this);
}
- for (auto& point : mRemoteSyncPoints) {
- point->setTransactionApplied();
- }
- for (auto& point : mLocalSyncPoints) {
- point->setFrameAvailable();
- }
mFrameTracker.logAndResetStats(mName);
+
+ destroyAllHwcLayers();
+
+ mFlinger->onLayerDestroyed();
}
// ---------------------------------------------------------------------------
@@ -140,10 +140,9 @@
void Layer::onLayerDisplayed(const sp<Fence>& /*releaseFence*/) {}
void Layer::onRemovedFromCurrentState() {
+ mRemovedFromCurrentState = true;
+
// the layer is removed from SF mCurrentState to mLayersPendingRemoval
-
- mPendingRemoval = true;
-
if (mCurrentState.zOrderRelativeOf != nullptr) {
sp<Layer> strongRelative = mCurrentState.zOrderRelativeOf.promote();
if (strongRelative != nullptr) {
@@ -153,22 +152,29 @@
mCurrentState.zOrderRelativeOf = nullptr;
}
+ // Since we are no longer reachable from CurrentState SurfaceFlinger
+ // will no longer invoke doTransaction for us, and so we will
+ // never finish applying transactions. We signal the sync point
+ // now so that another layer will not become indefinitely
+ // blocked.
+ for (auto& point: mRemoteSyncPoints) {
+ point->setTransactionApplied();
+ }
+ mRemoteSyncPoints.clear();
+
+ {
+ Mutex::Autolock syncLock(mLocalSyncPointMutex);
+ for (auto& point : mLocalSyncPoints) {
+ point->setFrameAvailable();
+ }
+ mLocalSyncPoints.clear();
+ }
+
for (const auto& child : mCurrentChildren) {
child->onRemovedFromCurrentState();
}
}
-void Layer::onRemoved() {
- // the layer is removed from SF mLayersPendingRemoval
- abandon();
-
- destroyAllHwcLayers();
-
- for (const auto& child : mCurrentChildren) {
- child->onRemoved();
- }
-}
-
// ---------------------------------------------------------------------------
// set-up
// ---------------------------------------------------------------------------
@@ -228,6 +234,10 @@
}
LOG_ALWAYS_FATAL_IF(!getBE().mHwcLayers.empty(),
"All hardware composer layers should have been destroyed");
+
+ for (const sp<Layer>& child : mDrawingChildren) {
+ child->destroyAllHwcLayers();
+ }
}
Rect Layer::getContentCrop() const {
@@ -752,6 +762,9 @@
// relevant frame
return false;
}
+ if (isRemovedFromCurrentState()) {
+ return false;
+ }
Mutex::Autolock lock(mLocalSyncPointMutex);
mLocalSyncPoints.push_back(point);
@@ -825,7 +838,9 @@
// If this transaction is waiting on the receipt of a frame, generate a sync
// point and send it to the remote layer.
- if (mCurrentState.barrierLayer_legacy != nullptr) {
+ // We don't allow installing sync points after we are removed from the current state
+ // as we won't be able to signal our end.
+ if (mCurrentState.barrierLayer_legacy != nullptr && !isRemovedFromCurrentState()) {
sp<Layer> barrierLayer = mCurrentState.barrierLayer_legacy.promote();
if (barrierLayer == nullptr) {
ALOGE("[%s] Unable to promote barrier Layer.", mName.string());
@@ -1994,6 +2009,10 @@
}
}
+bool Layer::isRemovedFromCurrentState() const {
+ return mRemovedFromCurrentState;
+}
+
// ---------------------------------------------------------------------------
}; // namespace android
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 5d05f05..12671ff 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -346,7 +346,7 @@
virtual bool isCreatedFromMainThread() const { return false; }
- bool isPendingRemoval() const { return mPendingRemoval; }
+ bool isRemovedFromCurrentState() const;
void writeToProto(LayerProto* layerInfo,
LayerVector::StateSet stateSet = LayerVector::StateSet::Drawing);
@@ -394,8 +394,6 @@
*/
virtual void onLayerDisplayed(const sp<Fence>& releaseFence);
- virtual void abandon() {}
-
virtual bool shouldPresentNow(nsecs_t /*expectedPresentTime*/) const { return false; }
virtual void setTransformHint(uint32_t /*orientation*/) const { }
@@ -475,12 +473,6 @@
*/
void onRemovedFromCurrentState();
- /*
- * called with the state lock from the main thread when the layer is
- * removed from the pending removal list
- */
- void onRemoved();
-
// Updates the transform hint in our SurfaceFlingerConsumer to match
// the current orientation of the display device.
void updateTransformHint(const sp<const DisplayDevice>& display) const;
@@ -595,12 +587,12 @@
*/
class LayerCleaner {
sp<SurfaceFlinger> mFlinger;
- wp<Layer> mLayer;
+ sp<Layer> mLayer;
protected:
~LayerCleaner() {
// destroy client resources
- mFlinger->onLayerDestroyed(mLayer);
+ mFlinger->onHandleDestroyed(mLayer);
}
public:
@@ -702,6 +694,8 @@
virtual PixelFormat getPixelFormat() const { return PIXEL_FORMAT_NONE; }
bool getPremultipledAlpha() const;
+ bool mPendingHWCDestroy{false};
+
protected:
// -----------------------------------------------------------------------
bool usingRelativeZ(LayerVector::StateSet stateSet);
@@ -745,7 +739,7 @@
// Whether filtering is needed b/c of the drawingstate
bool mNeedsFiltering{false};
- bool mPendingRemoval{false};
+ std::atomic<bool> mRemovedFromCurrentState{false};
// page-flip thread (currently main thread)
bool mProtectedByApp{false}; // application requires protected path to external sink
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 5c31ada..dec08fd 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -518,8 +518,12 @@
return mDisplayTokens[id];
}
-bool SurfaceFlinger::isColorManagementUsed() const {
- return useColorManagement;
+status_t SurfaceFlinger::getColorManagement(bool* outGetColorManagement) const {
+ if (!outGetColorManagement) {
+ return BAD_VALUE;
+ }
+ *outGetColorManagement = useColorManagement;
+ return NO_ERROR;
}
void SurfaceFlinger::bootFinished()
@@ -1568,8 +1572,7 @@
getBE().mEndOfFrameCompositionInfo = std::move(getBE().mCompositionInfo);
for (const auto& [token, display] : mDisplays) {
- const auto displayId = display->getId();
- for (auto& compositionInfo : getBE().mEndOfFrameCompositionInfo[displayId]) {
+ for (auto& compositionInfo : getBE().mEndOfFrameCompositionInfo[token]) {
compositionInfo.hwc.hwcLayer = nullptr;
}
}
@@ -1666,16 +1669,16 @@
mDrawingState.colorMatrixChanged = false;
for (const auto& [token, display] : mDisplays) {
- const auto displayId = display->getId();
- getBE().mCompositionInfo[displayId].clear();
+ getBE().mCompositionInfo[token].clear();
+
for (auto& layer : display->getVisibleLayersSortedByZ()) {
- auto displayId = display->getId();
+ const auto displayId = display->getId();
layer->getBE().compositionInfo.compositionType = layer->getCompositionType(displayId);
if (!layer->setHwcLayer(displayId)) {
ALOGV("Need to create HWCLayer for %s", layer->getName().string());
}
layer->getBE().compositionInfo.hwc.displayId = displayId;
- getBE().mCompositionInfo[displayId].push_back(layer->getBE().compositionInfo);
+ getBE().mCompositionInfo[token].push_back(layer->getBE().compositionInfo);
layer->getBE().compositionInfo.hwc.hwcLayer = nullptr;
}
}
@@ -1683,7 +1686,6 @@
void SurfaceFlinger::doDebugFlashRegions(const sp<DisplayDevice>& display, bool repaintEverything)
{
- const auto displayId = display->getId();
// is debugging enabled
if (CC_LIKELY(!mDebugRegion))
return;
@@ -1709,14 +1711,7 @@
usleep(mDebugRegion * 1000);
}
- if (display->isPoweredOn()) {
- status_t result = display->prepareFrame(
- *getBE().mHwc, getBE().mCompositionInfo[displayId]);
- ALOGE_IF(result != NO_ERROR,
- "prepareFrame for display %d failed:"
- " %d (%s)",
- display->getId(), result, strerror(-result));
- }
+ prepareFrame(display);
}
void SurfaceFlinger::doTracing(const char* where) {
@@ -2128,13 +2123,12 @@
void SurfaceFlinger::prepareFrame(const sp<DisplayDevice>& display)
{
- const auto displayId = display->getId();
if (!display->isPoweredOn()) {
return;
}
- status_t result = display->prepareFrame(
- *getBE().mHwc, getBE().mCompositionInfo[displayId]);
+ status_t result = display->prepareFrame(getHwComposer(),
+ getBE().mCompositionInfo[display->getDisplayToken()]);
ALOGE_IF(result != NO_ERROR,
"prepareFrame for display %d failed:"
" %d (%s)",
@@ -2735,7 +2729,15 @@
for (const auto& l : mLayersPendingRemoval) {
recordBufferingStats(l->getName().string(),
l->getOccupancyHistory(true));
- l->onRemoved();
+
+ // We need to release the HWC layers when the Layer is removed
+ // from the current state otherwise the HWC layer just continues
+ // showing at its last configured state until we eventually
+ // abandon the buffer queue.
+ if (l->isRemovedFromCurrentState()) {
+ l->destroyAllHwcLayers();
+ l->releasePendingBuffer(systemTime());
+ }
}
mLayersPendingRemoval.clear();
}
@@ -3168,7 +3170,7 @@
if (parent == nullptr) {
mCurrentState.layersSortedByZ.add(lbc);
} else {
- if (parent->isPendingRemoval()) {
+ if (parent->isRemovedFromCurrentState()) {
ALOGE("addClientLayer called with a removed parent");
return NAME_NOT_FOUND;
}
@@ -3184,7 +3186,6 @@
mMaxGraphicBufferProducerListSize, mNumLayers);
}
mLayersAdded = true;
- mNumLayers++;
}
// attach this layer to the client
@@ -3198,52 +3199,22 @@
return removeLayerLocked(mStateLock, layer, topLevelOnly);
}
-status_t SurfaceFlinger::removeLayerLocked(const Mutex&, const sp<Layer>& layer,
+status_t SurfaceFlinger::removeLayerLocked(const Mutex& lock, const sp<Layer>& layer,
bool topLevelOnly) {
- if (layer->isPendingRemoval()) {
- return NO_ERROR;
- }
-
const auto& p = layer->getParent();
ssize_t index;
if (p != nullptr) {
if (topLevelOnly) {
return NO_ERROR;
}
-
- sp<Layer> ancestor = p;
- while (ancestor->getParent() != nullptr) {
- ancestor = ancestor->getParent();
- }
- if (mCurrentState.layersSortedByZ.indexOf(ancestor) < 0) {
- ALOGE("removeLayer called with a layer whose parent has been removed");
- return NAME_NOT_FOUND;
- }
-
index = p->removeChild(layer);
} else {
index = mCurrentState.layersSortedByZ.remove(layer);
}
- // As a matter of normal operation, the LayerCleaner will produce a second
- // attempt to remove the surface. The Layer will be kept alive in mDrawingState
- // so we will succeed in promoting it, but it's already been removed
- // from mCurrentState. As long as we can find it in mDrawingState we have no problem
- // otherwise something has gone wrong and we are leaking the layer.
- if (index < 0 && mDrawingState.layersSortedByZ.indexOf(layer) < 0) {
- ALOGE("Failed to find layer (%s) in layer parent (%s).",
- layer->getName().string(),
- (p != nullptr) ? p->getName().string() : "no-parent");
- return BAD_VALUE;
- } else if (index < 0) {
- return NO_ERROR;
- }
-
layer->onRemovedFromCurrentState();
- mLayersPendingRemoval.add(layer);
- mLayersRemoved = true;
- mNumLayers -= 1 + layer->getChildrenCount();
- setTransactionFlags(eTransactionNeeded);
+
+ markLayerPendingRemovalLocked(lock, layer);
return NO_ERROR;
}
@@ -3444,11 +3415,6 @@
return 0;
}
- if (layer->isPendingRemoval()) {
- ALOGW("Attempting to set client state on removed layer: %s", layer->getName().string());
- return 0;
- }
-
uint32_t flags = 0;
const uint32_t what = s.what;
@@ -3652,11 +3618,6 @@
return;
}
- if (layer->isPendingRemoval()) {
- ALOGW("Attempting to destroy on removed layer: %s", layer->getName().string());
- return;
- }
-
if (state.what & layer_state_t::eDestroySurface) {
removeLayerLocked(mStateLock, layer);
}
@@ -3830,17 +3791,16 @@
return err;
}
-status_t SurfaceFlinger::onLayerDestroyed(const wp<Layer>& layer)
+void SurfaceFlinger::markLayerPendingRemovalLocked(const Mutex&, const sp<Layer>& layer) {
+ mLayersPendingRemoval.add(layer);
+ mLayersRemoved = true;
+ setTransactionFlags(eTransactionNeeded);
+}
+
+void SurfaceFlinger::onHandleDestroyed(const sp<Layer>& layer)
{
- // called by ~LayerCleaner() when all references to the IBinder (handle)
- // are gone
- sp<Layer> l = layer.promote();
- if (l == nullptr) {
- // The layer has already been removed, carry on
- return NO_ERROR;
- }
- // If we have a parent, then we can continue to live as long as it does.
- return removeLayer(l, true);
+ Mutex::Autolock lock(mStateLock);
+ markLayerPendingRemovalLocked(mStateLock, layer);
}
// ---------------------------------------------------------------------------
@@ -4390,17 +4350,13 @@
std::string stringResult;
for (const auto& [token, display] : mDisplays) {
- const auto displayId = display->getId();
- if (displayId == DisplayDevice::DISPLAY_ID_INVALID) {
+ const auto it = getBE().mEndOfFrameCompositionInfo.find(token);
+ if (it == getBE().mEndOfFrameCompositionInfo.end()) {
continue;
}
- const auto& compositionInfoIt = getBE().mEndOfFrameCompositionInfo.find(displayId);
- if (compositionInfoIt == getBE().mEndOfFrameCompositionInfo.end()) {
- break;
- }
- const auto& compositionInfoList = compositionInfoIt->second;
- stringResult += base::StringPrintf("Display: %d\n", displayId);
+ const auto& compositionInfoList = it->second;
+ stringResult += base::StringPrintf("%s\n", display->getDebugName().c_str());
stringResult += base::StringPrintf("numComponents: %zu\n", compositionInfoList.size());
for (const auto& compositionInfo : compositionInfoList) {
compositionInfo.dump(stringResult, nullptr);
@@ -4757,7 +4713,7 @@
case SET_TRANSACTION_STATE:
// Creating a scoped connection is safe, as per discussion in ISurfaceComposer.h
case CREATE_SCOPED_CONNECTION:
- case IS_COLOR_MANAGEMET_USED:
+ case GET_COLOR_MANAGEMENT:
case GET_COMPOSITION_PREFERENCE: {
return OK;
}
@@ -5189,7 +5145,7 @@
auto layerHandle = reinterpret_cast<Layer::Handle*>(layerHandleBinder.get());
auto parent = layerHandle->owner.promote();
- if (parent == nullptr || parent->isPendingRemoval()) {
+ if (parent == nullptr || parent->isRemovedFromCurrentState()) {
ALOGE("captureLayers called with a removed parent");
return NAME_NOT_FOUND;
}
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index d60765c..51168a6 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -222,8 +222,8 @@
// instances. Each hardware composer instance gets a different sequence id.
int32_t mComposerSequenceId;
- std::unordered_map<int32_t, std::vector<CompositionInfo>> mCompositionInfo;
- std::unordered_map<int32_t, std::vector<CompositionInfo>> mEndOfFrameCompositionInfo;
+ std::map<wp<IBinder>, std::vector<CompositionInfo>> mCompositionInfo;
+ std::map<wp<IBinder>, std::vector<CompositionInfo>> mEndOfFrameCompositionInfo;
};
@@ -364,6 +364,9 @@
bool authenticateSurfaceTextureLocked(
const sp<IGraphicBufferProducer>& bufferProducer) const;
+ inline void onLayerCreated() { mNumLayers++; }
+ inline void onLayerDestroyed() { mNumLayers--; }
+
private:
friend class Client;
friend class DisplayEventConnection;
@@ -466,7 +469,7 @@
virtual status_t enableVSyncInjections(bool enable);
virtual status_t injectVSync(nsecs_t when);
virtual status_t getLayerDebugInfo(std::vector<LayerDebugInfo>* outLayers) const;
- virtual bool isColorManagementUsed() const;
+ virtual status_t getColorManagement(bool* outGetColorManagement) const;
status_t getCompositionPreference(ui::Dataspace* outDataspace, ui::PixelFormat* outPixelFormat,
ui::Dataspace* outWideColorGamutDataspace,
ui::PixelFormat* outWideColorGamutPixelFormat) const override;
@@ -575,10 +578,12 @@
// ISurfaceComposerClient::destroySurface()
status_t onLayerRemoved(const sp<Client>& client, const sp<IBinder>& handle);
+ void markLayerPendingRemovalLocked(const Mutex& /* mStateLock */, const sp<Layer>& layer);
+
// called when all clients have released all their references to
// this layer meaning it is entirely safe to destroy all
// resources associated to this layer.
- status_t onLayerDestroyed(const wp<Layer>& layer);
+ void onHandleDestroyed(const sp<Layer>& layer);
// remove a layer from SurfaceFlinger immediately
status_t removeLayer(const sp<Layer>& layer, bool topLevelOnly = false);
diff --git a/services/surfaceflinger/tests/Transaction_test.cpp b/services/surfaceflinger/tests/Transaction_test.cpp
index c814142..94b33ac 100644
--- a/services/surfaceflinger/tests/Transaction_test.cpp
+++ b/services/surfaceflinger/tests/Transaction_test.cpp
@@ -318,7 +318,7 @@
sp<ISurfaceComposer> sf(ComposerService::getComposerService());
sp<IBinder> binder = sf->getBuiltInDisplay(ISurfaceComposer::eDisplayIdMain);
- mColorManagementUsed = sf->isColorManagementUsed();
+ ASSERT_NO_FATAL_FAILURE(sf->getColorManagement(&mColorManagementUsed));
}
virtual void TearDown() {
@@ -2614,6 +2614,37 @@
}
}
+TEST_F(ChildLayerTest, ChildrenSurviveParentDestruction) {
+ sp<SurfaceControl> mGrandChild =
+ mClient->createSurface(String8("Grand Child"), 10, 10,
+ PIXEL_FORMAT_RGBA_8888, 0, mChild.get());
+ fillSurfaceRGBA8(mGrandChild, 111, 111, 111);
+
+ {
+ SCOPED_TRACE("Grandchild visible");
+ ScreenCapture::captureScreen(&mCapture);
+ mCapture->checkPixel(64, 64, 111, 111, 111);
+ }
+
+ mChild->clear();
+
+ {
+ SCOPED_TRACE("After destroying child");
+ ScreenCapture::captureScreen(&mCapture);
+ mCapture->expectFGColor(64, 64);
+ }
+
+ asTransaction([&](Transaction& t) {
+ t.reparent(mGrandChild, mFGSurfaceControl->getHandle());
+ });
+
+ {
+ SCOPED_TRACE("After reparenting grandchild");
+ ScreenCapture::captureScreen(&mCapture);
+ mCapture->checkPixel(64, 64, 111, 111, 111);
+ }
+}
+
TEST_F(ChildLayerTest, DetachChildrenSameClient) {
asTransaction([&](Transaction& t) {
t.show(mChild);
diff --git a/services/vr/bufferhubd/Android.bp b/services/vr/bufferhubd/Android.bp
index 5debd3d..b5e6bb4 100644
--- a/services/vr/bufferhubd/Android.bp
+++ b/services/vr/bufferhubd/Android.bp
@@ -15,6 +15,7 @@
sharedLibraries = [
"libbase",
"libbinder",
+ "libbufferhubservice",
"libcutils",
"libgtest_prod",
"libgui",
@@ -32,7 +33,6 @@
"buffer_client.cpp",
"buffer_hub.cpp",
"buffer_hub_binder.cpp",
- "buffer_node.cpp",
"consumer_channel.cpp",
"consumer_queue_channel.cpp",
"IBufferHub.cpp",
diff --git a/services/vr/bufferhubd/buffer_node.cpp b/services/vr/bufferhubd/buffer_node.cpp
deleted file mode 100644
index 31c6ef0..0000000
--- a/services/vr/bufferhubd/buffer_node.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-#include <errno.h>
-
-#include <private/dvr/IBufferHub.h>
-#include <private/dvr/buffer_hub_defs.h>
-#include <private/dvr/buffer_node.h>
-#include <ui/GraphicBufferAllocator.h>
-
-namespace android {
-namespace dvr {
-
-void BufferNode::InitializeMetadata() {
- // Using placement new here to reuse shared memory instead of new allocation
- // Initialize the atomic variables to zero.
- BufferHubDefs::MetadataHeader* metadata_header = metadata_.metadata_header();
- buffer_state_ = new (&metadata_header->buffer_state) std::atomic<uint64_t>(0);
- fence_state_ = new (&metadata_header->fence_state) std::atomic<uint64_t>(0);
- active_clients_bit_mask_ =
- new (&metadata_header->active_clients_bit_mask) std::atomic<uint64_t>(0);
-}
-
-// Allocates a new BufferNode.
-BufferNode::BufferNode(uint32_t width, uint32_t height, uint32_t layer_count,
- uint32_t format, uint64_t usage,
- size_t user_metadata_size) {
- uint32_t out_stride = 0;
- // graphicBufferId is not used in GraphicBufferAllocator::allocate
- int ret = GraphicBufferAllocator::get().allocate(
- width, height, format, layer_count, usage,
- const_cast<const native_handle_t**>(&buffer_handle_), &out_stride,
- /*graphicBufferId=*/0, IBufferHub::getServiceName());
-
- if (ret != OK || buffer_handle_ == nullptr) {
- ALOGE("BufferNode::BufferNode: Failed to allocate buffer: %s",
- strerror(-ret));
- return;
- }
-
- buffer_desc_.width = width;
- buffer_desc_.height = height;
- buffer_desc_.layers = layer_count;
- buffer_desc_.format = format;
- buffer_desc_.usage = usage;
- buffer_desc_.stride = out_stride;
-
- metadata_ = BufferHubMetadata::Create(user_metadata_size);
- if (!metadata_.IsValid()) {
- ALOGE("BufferNode::BufferNode: Failed to allocate metadata.");
- return;
- }
- InitializeMetadata();
-}
-
-// Free the handle
-BufferNode::~BufferNode() {
- if (buffer_handle_ != nullptr) {
- status_t ret = GraphicBufferAllocator::get().free(buffer_handle_);
- if (ret != OK) {
- ALOGE("BufferNode::~BufferNode: Failed to free handle; Got error: %d",
- ret);
- }
- }
-}
-
-uint64_t BufferNode::GetActiveClientsBitMask() const {
- return active_clients_bit_mask_->load(std::memory_order_acquire);
-}
-
-uint64_t BufferNode::AddNewActiveClientsBitToMask() {
- uint64_t current_active_clients_bit_mask = GetActiveClientsBitMask();
- uint64_t client_state_mask = 0ULL;
- uint64_t updated_active_clients_bit_mask = 0ULL;
- do {
- client_state_mask = BufferHubDefs::FindNextAvailableClientStateMask(
- current_active_clients_bit_mask);
- if (client_state_mask == 0ULL) {
- ALOGE(
- "BufferNode::AddNewActiveClientsBitToMask: reached the maximum "
- "mumber of channels per buffer node: 32.");
- errno = E2BIG;
- return 0ULL;
- }
- updated_active_clients_bit_mask =
- current_active_clients_bit_mask | client_state_mask;
- } while (!(active_clients_bit_mask_->compare_exchange_weak(
- current_active_clients_bit_mask, updated_active_clients_bit_mask,
- std::memory_order_acq_rel, std::memory_order_acquire)));
- return client_state_mask;
-}
-
-void BufferNode::RemoveClientsBitFromMask(const uint64_t& value) {
- active_clients_bit_mask_->fetch_and(~value);
-}
-
-} // namespace dvr
-} // namespace android
diff --git a/services/vr/bufferhubd/include/private/dvr/buffer_node.h b/services/vr/bufferhubd/include/private/dvr/buffer_node.h
index bc0a34e..997aeda 100644
--- a/services/vr/bufferhubd/include/private/dvr/buffer_node.h
+++ b/services/vr/bufferhubd/include/private/dvr/buffer_node.h
@@ -1,77 +1,14 @@
#ifndef ANDROID_DVR_BUFFERHUBD_BUFFER_NODE_H_
#define ANDROID_DVR_BUFFERHUBD_BUFFER_NODE_H_
+// TODO(b/118891412) Remove this file
-#include <android/hardware_buffer.h>
-#include <private/dvr/ion_buffer.h>
-#include <ui/BufferHubMetadata.h>
+#include <bufferhub/BufferNode.h>
namespace android {
namespace dvr {
-class BufferNode {
- public:
- // Allocates a new BufferNode.
- BufferNode(uint32_t width, uint32_t height, uint32_t layer_count,
- uint32_t format, uint64_t usage, size_t user_metadata_size);
-
- ~BufferNode();
-
- // Returns whether the object holds a valid metadata.
- bool IsValid() const { return metadata_.IsValid(); }
-
- size_t user_metadata_size() const { return metadata_.user_metadata_size(); }
-
- // Accessors of the buffer description and handle
- const native_handle_t* buffer_handle() const { return buffer_handle_; }
- const AHardwareBuffer_Desc& buffer_desc() const { return buffer_desc_; }
-
- // Accessors of metadata.
- const BufferHubMetadata& metadata() const { return metadata_; }
-
- // Gets the current value of active_clients_bit_mask in metadata_ with
- // std::memory_order_acquire, so that all previous releases of
- // active_clients_bit_mask from all threads will be returned here.
- uint64_t GetActiveClientsBitMask() const;
-
- // Find and add a new client_state_mask to active_clients_bit_mask in
- // metadata_.
- // Return the new client_state_mask that is added to active_clients_bit_mask.
- // Return 0ULL if there are already 32 bp clients of the buffer.
- uint64_t AddNewActiveClientsBitToMask();
-
- // Removes the value from active_clients_bit_mask in metadata_ with
- // std::memory_order_release, so that the change will be visible to any
- // acquire of active_clients_bit_mask_ in any threads after the succeed of
- // this operation.
- void RemoveClientsBitFromMask(const uint64_t& value);
-
- private:
- // Helper method for constructors to initialize atomic metadata header
- // variables in shared memory.
- void InitializeMetadata();
-
- // Gralloc buffer handles.
- native_handle_t* buffer_handle_;
- AHardwareBuffer_Desc buffer_desc_;
-
- // Metadata in shared memory.
- BufferHubMetadata metadata_;
-
- // The following variables are atomic variables in metadata_ that are visible
- // to Bn object and Bp objects. Please find more info in
- // BufferHubDefs::MetadataHeader.
-
- // buffer_state_ tracks the state of the buffer. Buffer can be in one of these
- // four states: gained, posted, acquired, released.
- std::atomic<uint64_t>* buffer_state_ = nullptr;
-
- // TODO(b/112012161): add comments to fence_state_.
- std::atomic<uint64_t>* fence_state_ = nullptr;
-
- // active_clients_bit_mask_ tracks all the bp clients of the buffer. It is the
- // union of all client_state_mask of all bp clients.
- std::atomic<uint64_t>* active_clients_bit_mask_ = nullptr;
-};
+typedef android::frameworks::bufferhub::V1_0::implementation::BufferNode
+ BufferNode;
} // namespace dvr
} // namespace android
diff --git a/services/vr/bufferhubd/producer_channel.cpp b/services/vr/bufferhubd/producer_channel.cpp
index 241eee7..397c0ae 100644
--- a/services/vr/bufferhubd/producer_channel.cpp
+++ b/services/vr/bufferhubd/producer_channel.cpp
@@ -311,8 +311,11 @@
return ErrorStatus(ENOMEM);
}
- if (!producer_owns_ && !BufferHubDefs::IsBufferReleased(
- buffer_state_->load(std::memory_order_acquire))) {
+ uint64_t current_buffer_state =
+ buffer_state_->load(std::memory_order_acquire);
+ if (!producer_owns_ &&
+ (BufferHubDefs::IsBufferPosted(current_buffer_state) ||
+ BufferHubDefs::IsBufferAcquired(current_buffer_state))) {
// Signal the new consumer when adding it to a posted producer.
if (consumer->OnProducerPosted())
pending_consumers_++;
diff --git a/services/vr/bufferhubd/tests/Android.bp b/services/vr/bufferhubd/tests/Android.bp
index c77d2d2..a611268 100644
--- a/services/vr/bufferhubd/tests/Android.bp
+++ b/services/vr/bufferhubd/tests/Android.bp
@@ -24,33 +24,3 @@
// TODO(b/117568153): Temporarily opt out using libcrt.
no_libcrt: true,
}
-
-cc_test {
- name: "buffer_node-test",
- srcs: ["buffer_node-test.cpp"],
- cflags: [
- "-DLOG_TAG=\"buffer_node-test\"",
- "-DTRACE=0",
- "-DATRACE_TAG=ATRACE_TAG_GRAPHICS",
- ],
- header_libs: [
- "libdvr_headers",
- "libnativewindow_headers",
- ],
- static_libs: [
- "libbufferhub",
- "libbufferhubd",
- "libgmock",
- ],
- shared_libs: [
- "libbase",
- "libbinder",
- "liblog",
- "libpdx_default_transport",
- "libui",
- "libutils",
- ],
- // TODO(b/117568153): Temporarily opt out using libcrt.
- no_libcrt: true,
-}
-
diff --git a/services/vr/bufferhubd/tests/buffer_node-test.cpp b/services/vr/bufferhubd/tests/buffer_node-test.cpp
deleted file mode 100644
index 30ecbec..0000000
--- a/services/vr/bufferhubd/tests/buffer_node-test.cpp
+++ /dev/null
@@ -1,105 +0,0 @@
-#include <errno.h>
-#include <gmock/gmock.h>
-#include <gtest/gtest.h>
-#include <private/dvr/buffer_node.h>
-#include <ui/GraphicBufferMapper.h>
-
-namespace android {
-namespace dvr {
-
-namespace {
-
-using testing::NotNull;
-
-const uint32_t kWidth = 640;
-const uint32_t kHeight = 480;
-const uint32_t kLayerCount = 1;
-const uint32_t kFormat = 1;
-const uint64_t kUsage = 0;
-const size_t kUserMetadataSize = 0;
-const size_t kMaxClientsCount = BufferHubDefs::kMaxNumberOfClients;
-
-class BufferNodeTest : public ::testing::Test {
- protected:
- void SetUp() override {
- buffer_node = new BufferNode(kWidth, kHeight, kLayerCount, kFormat, kUsage,
- kUserMetadataSize);
- ASSERT_TRUE(buffer_node->IsValid());
- }
-
- void TearDown() override {
- if (buffer_node != nullptr) {
- delete buffer_node;
- }
- }
-
- BufferNode* buffer_node = nullptr;
-};
-
-TEST_F(BufferNodeTest, TestCreateBufferNode) {
- EXPECT_EQ(buffer_node->user_metadata_size(), kUserMetadataSize);
- // Test the handle just allocated is good (i.e. able to be imported)
- GraphicBufferMapper& mapper = GraphicBufferMapper::get();
- const native_handle_t* outHandle;
- status_t ret = mapper.importBuffer(
- buffer_node->buffer_handle(), buffer_node->buffer_desc().width,
- buffer_node->buffer_desc().height, buffer_node->buffer_desc().layers,
- buffer_node->buffer_desc().format, buffer_node->buffer_desc().usage,
- buffer_node->buffer_desc().stride, &outHandle);
- EXPECT_EQ(ret, OK);
- EXPECT_THAT(outHandle, NotNull());
-}
-
-TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_twoNewClients) {
- uint64_t new_client_state_mask_1 =
- buffer_node->AddNewActiveClientsBitToMask();
- EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), new_client_state_mask_1);
-
- // Request and add a new client_state_mask again.
- // Active clients bit mask should be the union of the two new
- // client_state_masks.
- uint64_t new_client_state_mask_2 =
- buffer_node->AddNewActiveClientsBitToMask();
- EXPECT_EQ(buffer_node->GetActiveClientsBitMask(),
- new_client_state_mask_1 | new_client_state_mask_2);
-}
-
-TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_32NewClients) {
- uint64_t new_client_state_mask = 0ULL;
- uint64_t current_mask = 0ULL;
- uint64_t expected_mask = 0ULL;
-
- for (int i = 0; i < kMaxClientsCount; ++i) {
- new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
- EXPECT_NE(new_client_state_mask, 0);
- EXPECT_FALSE(new_client_state_mask & current_mask);
- expected_mask = current_mask | new_client_state_mask;
- current_mask = buffer_node->GetActiveClientsBitMask();
- EXPECT_EQ(current_mask, expected_mask);
- }
-
- // Method should fail upon requesting for more than maximum allowable clients.
- new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
- EXPECT_EQ(new_client_state_mask, 0ULL);
- EXPECT_EQ(errno, E2BIG);
-}
-
-TEST_F(BufferNodeTest, TestRemoveActiveClientsBitFromMask) {
- buffer_node->AddNewActiveClientsBitToMask();
- uint64_t current_mask = buffer_node->GetActiveClientsBitMask();
- uint64_t new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask();
- EXPECT_NE(buffer_node->GetActiveClientsBitMask(), current_mask);
-
- buffer_node->RemoveClientsBitFromMask(new_client_state_mask);
- EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask);
-
- // Remove the test_mask again to the active client bit mask should not modify
- // the value of active clients bit mask.
- buffer_node->RemoveClientsBitFromMask(new_client_state_mask);
- EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask);
-}
-
-} // namespace
-
-} // namespace dvr
-} // namespace android
diff --git a/vulkan/api/vulkan.api b/vulkan/api/vulkan.api
index e5b9b47..8f76606 100644
--- a/vulkan/api/vulkan.api
+++ b/vulkan/api/vulkan.api
@@ -28,7 +28,7 @@
// API version (major.minor.patch)
define VERSION_MAJOR 1
define VERSION_MINOR 1
-define VERSION_PATCH 90
+define VERSION_PATCH 91
// API limits
define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256
@@ -85,9 +85,7 @@
@extension("VK_KHR_wayland_surface") define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6
@extension("VK_KHR_wayland_surface") define VK_KHR_WAYLAND_SURFACE_NAME "VK_KHR_wayland_surface"
-// 8
-@extension("VK_KHR_mir_surface") define VK_KHR_MIR_SURFACE_SPEC_VERSION 4
-@extension("VK_KHR_mir_surface") define VK_KHR_MIR_SURFACE_NAME "VK_KHR_mir_surface"
+// 8 - VK_KHR_mir_surface removed
// 9
@extension("VK_KHR_android_surface") define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
@@ -526,8 +524,8 @@
@extension("VK_NV_shading_rate_image") define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image"
// 166
-@extension("VK_NVX_raytracing") define VK_NVX_RAYTRACING_SPEC_VERSION 1
-@extension("VK_NVX_raytracing") define VK_NVX_RAYTRACING_EXTENSION_NAME "VK_NVX_raytracing"
+@extension("VK_NV_raytracing") define VK_NV_RAYTRACING_SPEC_VERSION 2
+@extension("VK_NV_raytracing") define VK_NV_RAYTRACING_EXTENSION_NAME "VK_NV_raytracing"
// 167
@extension("VK_NV_representative_fragment_test") define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 1
@@ -565,6 +563,10 @@
@extension("VK_AMD_shader_core_properties") define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1
@extension("VK_AMD_shader_core_properties") define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties"
+// 190
+@extension("VK_AMD_memory_overallocation_behavior") define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1
+@extension("VK_AMD_memory_overallocation_behavior") define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior"
+
// 191
@extension("VK_EXT_vertex_attribute_divisor") define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 2
@extension("VK_EXT_vertex_attribute_divisor") define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor"
@@ -693,7 +695,7 @@
@extension("VK_EXT_validation_cache") @nonDispatchHandle type u64 VkValidationCacheEXT
// 166
-@extension("VK_NVX_raytracing") @nonDispatchHandle type u64 VkAccelerationStructureNVX
+@extension("VK_NV_raytracing") @nonDispatchHandle type u64 VkAccelerationStructureNV
/////////////
// Enums //
@@ -794,8 +796,8 @@
//@extension("VK_EXT_inline_uniform_block") // 139
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000,
- //@extension("VK_NVX_raytracing") // 166
- VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,
}
enum VkQueryType {
@@ -806,8 +808,8 @@
//@extension("VK_EXT_transform_feedback") // 29
VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004,
- //@extension("VK_NVX_raytracing") // 166
- VK_QUERY_TYPE_COMPACTED_SIZE_NVX = 1000165000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000,
}
enum VkBorderColor {
@@ -823,8 +825,8 @@
VK_PIPELINE_BIND_POINT_GRAPHICS = 0x00000000,
VK_PIPELINE_BIND_POINT_COMPUTE = 0x00000001,
- //@extension("VK_NVX_raytracing") // 166
- VK_PIPELINE_BIND_POINT_RAYTRACING_NVX = 1000165000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000,
}
enum VkPrimitiveTopology {
@@ -849,6 +851,9 @@
enum VkIndexType {
VK_INDEX_TYPE_UINT16 = 0x00000000,
VK_INDEX_TYPE_UINT32 = 0x00000001,
+
+ //@extension("VK_NV_raytracing") // 166
+ VK_INDEX_TYPE_NONE_NV = 1000165000,
}
enum VkFilter {
@@ -1447,9 +1452,6 @@
//@extension("VK_KHR_wayland_surface") // 7
VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
- //@extension("VK_KHR_mir_surface") // 8
- VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
-
//@extension("VK_KHR_android_surface") // 9
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
@@ -1786,18 +1788,18 @@
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005,
- //@extension("VK_NVX_raytracing") // 166
- VK_STRUCTURE_TYPE_RAYTRACING_PIPELINE_CREATE_INFO_NVX = 1000165000,
- VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NVX = 1000165001,
- VK_STRUCTURE_TYPE_GEOMETRY_INSTANCE_NVX = 1000165002,
- VK_STRUCTURE_TYPE_GEOMETRY_NVX = 1000165003,
- VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NVX = 1000165004,
- VK_STRUCTURE_TYPE_GEOMETRY_AABB_NVX = 1000165005,
- VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NVX = 1000165006,
- VK_STRUCTURE_TYPE_DESCRIPTOR_ACCELERATION_STRUCTURE_INFO_NVX = 1000165007,
- VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NVX = 1000165008,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAYTRACING_PROPERTIES_NVX = 1000165009,
- VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NVX = 1000165010,
+ //@extension("VK_NV_raytracing") // 166
+ VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001,
+ VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003,
+ VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004,
+ VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005,
+ VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009,
+ VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012,
//@extension("VK_NV_representative_fragment_test") // 167
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000,
@@ -1830,6 +1832,9 @@
//@extension("VK_AMD_shader_core_properties") // 186
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000,
+ //@extension("VK_AMD_memory_overallocation_behavior") // 190
+ VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000,
+
//@extension("VK_EXT_vertex_attribute_divisor") // 191
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
@@ -2011,8 +2016,8 @@
//@extension("VK_EXT_validation_cache") // 161
VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000,
- //@extension("VK_NVX_raytracing") // 166
- VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,
}
@@ -2135,8 +2140,8 @@
//@extension("VK_KHR_sampler_ycbcr_conversion") // 157
VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = 1000156000,
- //@extension("VK_NVX_raytracing") // 166
- VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX_EXT = 1000165000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000,
}
@extension("VK_AMD_rasterization_order") // 19
@@ -2311,22 +2316,36 @@
VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3,
}
-@extension("VK_NVX_raytracing") // 166
-enum VkGeometryTypeNVX {
- VK_GEOMETRY_TYPE_TRIANGLES_NVX = 0,
- VK_GEOMETRY_TYPE_AABBS_NVX = 1,
+@extension("VK_NV_raytracing") // 166
+enum VkRayTracingShaderGroupTypeNV {
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2,
}
-@extension("VK_NVX_raytracing") // 166
-enum VkAccelerationStructureTypeNVX {
- VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX = 0,
- VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX = 1,
+@extension("VK_NV_raytracing") // 166
+enum VkGeometryTypeNV {
+ VK_GEOMETRY_TYPE_TRIANGLES_NV = 0,
+ VK_GEOMETRY_TYPE_AABBS_NV = 1,
}
-@extension("VK_NVX_raytracing") // 166
-enum VkCopyAccelerationStructureModeNVX {
- VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX = 0,
- VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX = 1,
+@extension("VK_NV_raytracing") // 166
+enum VkAccelerationStructureTypeNV {
+ VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0,
+ VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1,
+}
+
+@extension("VK_NV_raytracing") // 166
+enum VkCopyAccelerationStructureModeNV {
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0,
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1,
+}
+
+@extension("VK_NV_raytracing") // 166
+enum VkAccelerationStructureMemoryRequirementsTypeNV {
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2,
}
@extension("VK_EXT_global_priority") // 175
@@ -2345,6 +2364,13 @@
VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3,
}
+@extension("VK_AMD_memory_overallocation_behavior") // 190
+enum VkMemoryOverallocationBehaviorAMD {
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2,
+}
+
@extension("VK_KHR_driver_properties") // 197
enum VkDriverIdKHR {
VK_DRIVER_ID_AMD_PROPRIETARY_KHR = 1,
@@ -2433,9 +2459,9 @@
//@extension("VK_NV_shading_rate_image") // 165
VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000,
- //@extension("VK_NVX_raytracing") // 166
- VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX = 0x00200000,
- VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX = 0x00400000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000,
+ VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000,
//@extension("VK_EXT_transform_feedback") // 29
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000,
@@ -2459,8 +2485,8 @@
//@extension("VK_EXT_conditional_rendering") // 82
VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200,
- //@extension("VK_NVX_raytracing") // 166
- VK_BUFFER_USAGE_RAYTRACING_BIT_NVX = 0x00000400,
+ //@extension("VK_NV_raytracing") // 166
+ VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400,
//@extension("VK_EXT_transform_feedback") // 29
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800,
@@ -2491,13 +2517,13 @@
VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
- //@extension("VK_NVX_raytracing") // 166
- VK_SHADER_STAGE_RAYGEN_BIT_NVX = 0x00000100,
- VK_SHADER_STAGE_ANY_HIT_BIT_NVX = 0x00000200,
- VK_SHADER_STAGE_CLOSEST_HIT_BIT_NVX = 0x00000400,
- VK_SHADER_STAGE_MISS_BIT_NVX = 0x00000800,
- VK_SHADER_STAGE_INTERSECTION_BIT_NVX = 0x00001000,
- VK_SHADER_STAGE_CALLABLE_BIT_NVX = 0x00002000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100,
+ VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200,
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400,
+ VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800,
+ VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000,
+ VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000,
//@extension("VK_NV_mesh_shader") // 203
VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040,
@@ -2595,8 +2621,8 @@
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008,
VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = 0x00000010,
- //@extension("VK_NVX_raytracing") // 166
- VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NVX = 0x00000020,
+ //@extension("VK_NV_raytracing") // 166
+ VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020,
}
/// Color component flags
@@ -2791,8 +2817,9 @@
//@extension("VK_NV_shading_rate_image") // 165
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000,
- //@extension("VK_NVX_raytracing") // 166
- VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX = 0x00200000,
+ //@extension("VK_NV_raytracing") // 166
+ VK_PIPELINE_STAGE_RAY_TRACING_BIT_NV = 0x00200000,
+ VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000,
//@extension("VK_NV_mesh_shader") // 203
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000,
@@ -3194,12 +3221,6 @@
//bitfield VkWaylandSurfaceCreateFlagBitsKHR {
//}
-@extension("VK_KHR_mir_surface") // 8
-type VkFlags VkMirSurfaceCreateFlagsKHR
-//@extension("VK_KHR_mir_surface") // 8
-//bitfield VkMirSurfaceCreateFlagBitsKHR {
-//}
-
@extension("VK_KHR_android_surface") // 9
type VkFlags VkAndroidSurfaceCreateFlagsKHR
//@extension("VK_KHR_android_surface") // 9
@@ -3484,33 +3505,33 @@
VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x00000008,
}
-@extension("VK_NVX_raytracing") // 166
-type VkFlags VkGeometryFlagsNVX
-@extension("VK_NVX_raytracing") // 166
-bitfield VkGeometryFlagBitsNVX {
- VK_GEOMETRY_OPAQUE_BIT_NVX = 0x00000001,
- VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NVX = 0x00000002,
+@extension("VK_NV_raytracing") // 166
+type VkFlags VkGeometryFlagsNV
+@extension("VK_NV_raytracing") // 166
+bitfield VkGeometryFlagBitsNV {
+ VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001,
+ VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002,
}
-@extension("VK_NVX_raytracing") // 166
-type VkFlags VkGeometryInstanceFlagsNVX
-@extension("VK_NVX_raytracing") // 166
-bitfield VkGeometryInstanceFlagBitsNVX {
- VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NVX = 0x00000001,
- VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_FLIP_WINDING_BIT_NVX = 0x00000002,
- VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NVX = 0x00000004,
- VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NVX = 0x00000008,
+@extension("VK_NV_raytracing") // 166
+type VkFlags VkGeometryInstanceFlagsNV
+@extension("VK_NV_raytracing") // 166
+bitfield VkGeometryInstanceFlagBitsNV {
+ VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001,
+ VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002,
+ VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004,
+ VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008,
}
-@extension("VK_NVX_raytracing") // 166
-type VkFlags VkBuildAccelerationStructureFlagsNVX
-@extension("VK_NVX_raytracing") // 166
-bitfield VkBuildAccelerationStructureFlagBitsNVX {
- VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NVX = 0x00000001,
- VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NVX = 0x00000002,
- VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NVX = 0x00000004,
- VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NVX = 0x00000008,
- VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NVX = 0x00000010,
+@extension("VK_NV_raytracing") // 166
+type VkFlags VkBuildAccelerationStructureFlagsNV
+@extension("VK_NV_raytracing") // 166
+bitfield VkBuildAccelerationStructureFlagBitsNV {
+ VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001,
+ VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002,
+ VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004,
+ VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008,
+ VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010,
}
@extension("VK_FUCHSIA_imagepipe_surface") // 215
@@ -5330,15 +5351,6 @@
platform.wl_surface* surface
}
-@extension("VK_KHR_mir_surface") // 8
-class VkMirSurfaceCreateInfoKHR {
- VkStructureType sType
- const void* pNext
- VkMirSurfaceCreateFlagsKHR flags
- platform.MirConnection* connection
- platform.MirSurface* mirSurface
-}
-
@extension("VK_KHR_android_surface") // 9
class VkAndroidSurfaceCreateInfoKHR {
VkStructureType sType
@@ -7249,22 +7261,34 @@
const VkCoarseSampleOrderCustomNV* pCustomSampleOrders
}
-@extension("VK_NVX_raytracing") // 166
-class VkRaytracingPipelineCreateInfoNVX {
- VkStructureType sType
- const void* pNext
- VkPipelineCreateFlags flags
- u32 stageCount
- const VkPipelineShaderStageCreateInfo* pStages
- const u32* pGroupNumbers
- u32 maxRecursionDepth
- VkPipelineLayout layout
- VkPipeline basePipelineHandle
- s32 basePipelineIndex
+@extension("VK_NV_raytracing") // 166
+class VkRayTracingShaderGroupCreateInfoNV {
+ VkStructureType sType
+ const void* pNext
+ VkRayTracingShaderGroupTypeNV type
+ u32 generalShader
+ u32 closestHitShader
+ u32 anyHitShader
+ u32 intersectionShader
}
-@extension("VK_NVX_raytracing") // 166
-class VkGeometryTrianglesNVX {
+@extension("VK_NV_raytracing") // 166
+class VkRayTracingPipelineCreateInfoNV {
+ VkStructureType sType
+ const void* pNext
+ VkPipelineCreateFlags flags
+ u32 stageCount
+ const VkPipelineShaderStageCreateInfo* pStages
+ u32 groupCount
+ const VkRayTracingShaderGroupCreateInfoNV* pGroups
+ u32 maxRecursionDepth
+ VkPipelineLayout layout
+ VkPipeline basePipelineHandle
+ s32 basePipelineIndex
+}
+
+@extension("VK_NV_raytracing") // 166
+class VkGeometryTrianglesNV {
VkStructureType sType
const void* pNext
VkBuffer vertexData
@@ -7280,8 +7304,8 @@
VkDeviceSize transformOffset
}
-@extension("VK_NVX_raytracing") // 166
-class VkGeometryAABBNVX {
+@extension("VK_NV_raytracing") // 166
+class VkGeometryAABBNV {
VkStructureType sType
const void* pNext
VkBuffer aabbData
@@ -7290,66 +7314,79 @@
VkDeviceSize offset
}
-@extension("VK_NVX_raytracing") // 166
-class VkGeometryDataNVX {
- VkGeometryTrianglesNVX triangles
- VkGeometryAABBNVX aabbs
+@extension("VK_NV_raytracing") // 166
+class VkGeometryDataNV {
+ VkGeometryTrianglesNV triangles
+ VkGeometryAABBNV aabbs
}
-@extension("VK_NVX_raytracing") // 166
-class VkGeometryNVX {
+@extension("VK_NV_raytracing") // 166
+class VkGeometryNV {
VkStructureType sType
const void* pNext
- VkGeometryTypeNVX geometryType
- VkGeometryDataNVX geometry
- VkGeometryFlagsNVX flags
+ VkGeometryTypeNV geometryType
+ VkGeometryDataNV geometry
+ VkGeometryFlagsNV flags
}
-@extension("VK_NVX_raytracing") // 166
-class VkAccelerationStructureCreateInfoNVX {
+@extension("VK_NV_raytracing") // 166
+class VkAccelerationStructureInfoNV {
VkStructureType sType
const void* pNext
- VkAccelerationStructureTypeNVX type
- VkBuildAccelerationStructureFlagsNVX flags
- VkDeviceSize compactedSize
+ VkAccelerationStructureTypeNV type
+ VkBuildAccelerationStructureFlagsNV flags
u32 instanceCount
u32 geometryCount
- const VkGeometryNVX* pGeometries
+ const VkGeometryNV* pGeometries
}
-@extension("VK_NVX_raytracing") // 166
-class VkBindAccelerationStructureMemoryInfoNVX {
+@extension("VK_NV_raytracing") // 166
+class VkAccelerationStructureCreateInfoNV {
VkStructureType sType
const void* pNext
- VkAccelerationStructureNVX accelerationStructure
+ VkDeviceSize compactedSize
+ VkAccelerationStructureInfoNV info
+}
+
+@extension("VK_NV_raytracing") // 166
+class VkBindAccelerationStructureMemoryInfoNV {
+ VkStructureType sType
+ const void* pNext
+ VkAccelerationStructureNV accelerationStructure
VkDeviceMemory memory
VkDeviceSize memoryOffset
u32 deviceIndexCount
const u32* pDeviceIndices
}
-@extension("VK_NVX_raytracing") // 166
-class VkDescriptorAccelerationStructureInfoNVX {
+@extension("VK_NV_raytracing") // 166
+class VkDescriptorAccelerationStructureInfoNV {
VkStructureType sType
const void* pNext
u32 accelerationStructureCount
- const VkAccelerationStructureNVX* pAccelerationStructures
+ const VkAccelerationStructureNV* pAccelerationStructures
}
-@extension("VK_NVX_raytracing") // 166
-class VkAccelerationStructureMemoryRequirementsInfoNVX {
+@extension("VK_NV_raytracing") // 166
+class VkAccelerationStructureMemoryRequirementsInfoNV {
VkStructureType sType
const void* pNext
- VkAccelerationStructureNVX accelerationStructure
+ VkAccelerationStructureMemoryRequirementsTypeNV type
+ VkAccelerationStructureNV accelerationStructure
}
-@extension("VK_NVX_raytracing") // 166
-class VkPhysicalDeviceRaytracingPropertiesNVX {
+@extension("VK_NV_raytracing") // 166
+class VkPhysicalDeviceRaytracingPropertiesNV {
VkStructureType sType
void* pNext
- u32 shaderHeaderSize
+ u32 shaderGroupHandleSize
u32 maxRecursionDepth
- u32 maxGeometryCount
+ u32 maxShaderGroupStride
+ u32 shaderGroupBaseAlignment
+ u64 maxGeometryCount
+ u64 maxInstanceCount
+ u64 maxTriangleCount
+ u32 maxDescriptorSetAccelerationStructures
}
@extension("VK_NV_representative_fragment_test") // 167
@@ -7454,6 +7491,13 @@
u32 vgprAllocationGranularity
}
+@extension("VK_AMD_memory_overallocation_behavior") // 190
+class VkDeviceMemoryOverallocationCreateInfoAMD {
+ VkStructureType sType
+ const void* pNext
+ VkMemoryOverallocationBehaviorAMD overallocationBehavior
+}
+
@extension("VK_EXT_vertex_attribute_divisor") // 191
class VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT {
VkStructureType sType
@@ -10370,25 +10414,6 @@
return ?
}
-@extension("VK_KHR_mir_surface") // 8
-cmd VkResult vkCreateMirSurfaceKHR(
- VkInstance instance,
- const VkMirSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface) {
- instanceObject := GetInstance(instance)
- return ?
-}
-
-@extension("VK_KHR_mir_surface") // 8
-cmd VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- u32 queueFamilyIndex,
- platform.MirConnection* connection) {
- physicalDeviceObject := GetPhysicalDevice(physicalDevice)
- return ?
-}
-
@extension("VK_KHR_android_surface") // 9
cmd VkResult vkCreateAndroidSurfaceKHR(
VkInstance instance,
@@ -11448,71 +11473,60 @@
const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkCreateAccelerationStructureNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkCreateAccelerationStructureNV(
VkDevice device,
- const VkAccelerationStructureCreateInfoNVX* pCreateInfo,
+ const VkAccelerationStructureCreateInfoNV* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkAccelerationStructureNVX* pAccelerationStructure) {
+ VkAccelerationStructureNV* pAccelerationStructure) {
return ?
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkDestroyAccelerationStructureNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkDestroyAccelerationStructureNV(
VkDevice device,
- VkAccelerationStructureNVX accelerationStructure,
+ VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks* pAllocator) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkGetAccelerationStructureMemoryRequirementsNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkGetAccelerationStructureMemoryRequirementsNV(
VkDevice device,
- const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo,
+ const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
VkMemoryRequirements2KHR* pMemoryRequirements) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkGetAccelerationStructureScratchMemoryRequirementsNVX(
- VkDevice device,
- const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo,
- VkMemoryRequirements2KHR* pMemoryRequirements) {
-}
-
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkBindAccelerationStructureMemoryNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkBindAccelerationStructureMemoryNV(
VkDevice device,
u32 bindInfoCount,
- const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos) {
+ const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) {
return ?
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkCmdBuildAccelerationStructureNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkCmdBuildAccelerationStructureNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureTypeNVX type,
- u32 instanceCount,
+ const VkAccelerationStructureInfoNV* pInfo,
VkBuffer instanceData,
VkDeviceSize instanceOffset,
- u32 geometryCount,
- const VkGeometryNVX* pGeometries,
- VkBuildAccelerationStructureFlagsNVX flags,
VkBool32 update,
- VkAccelerationStructureNVX dst,
- VkAccelerationStructureNVX src,
+ VkAccelerationStructureNV dst,
+ VkAccelerationStructureNV src,
VkBuffer scratch,
VkDeviceSize scratchOffset) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkCmdCopyAccelerationStructureNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkCmdCopyAccelerationStructureNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureNVX dst,
- VkAccelerationStructureNVX src,
- VkCopyAccelerationStructureModeNVX mode) {
+ VkAccelerationStructureNV dst,
+ VkAccelerationStructureNV src,
+ VkCopyAccelerationStructureModeNV mode) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkCmdTraceRaysNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkCmdTraceRaysNV(
VkCommandBuffer commandBuffer,
VkBuffer raygenShaderBindingTableBuffer,
VkDeviceSize raygenShaderBindingOffset,
@@ -11522,23 +11536,27 @@
VkBuffer hitShaderBindingTableBuffer,
VkDeviceSize hitShaderBindingOffset,
VkDeviceSize hitShaderBindingStride,
+ VkBuffer callableShaderBindingTableBuffer,
+ VkDeviceSize callableShaderBindingOffset,
+ VkDeviceSize callableShaderBindingStride,
u32 width,
- u32 height) {
+ u32 height,
+ u32 depth) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkCreateRaytracingPipelinesNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkCreateRaytracingPipelinesNV(
VkDevice device,
VkPipelineCache pipelineCache,
u32 createInfoCount,
- const VkRaytracingPipelineCreateInfoNVX* pCreateInfos,
+ const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines) {
return ?
}
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkGetRaytracingShaderHandlesNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkGetRaytracingShaderHandlesNV(
VkDevice device,
VkPipeline pipeline,
u32 firstGroup,
@@ -11548,26 +11566,27 @@
return ?
}
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkGetAccelerationStructureHandleNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkGetAccelerationStructureHandleNV(
VkDevice device,
- VkAccelerationStructureNVX accelerationStructure,
+ VkAccelerationStructureNV accelerationStructure,
platform.size_t dataSize,
void* pData) {
return ?
}
-@extension("VK_NVX_raytracing") // 166
-cmd void vkCmdWriteAccelerationStructurePropertiesNVX(
+@extension("VK_NV_raytracing") // 166
+cmd void vkCmdWriteAccelerationStructurePropertiesNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureNVX accelerationStructure,
+ u32 accelerationStructureCount,
+ const VkAccelerationStructureNV* pAccelerationStructures,
VkQueryType queryType,
VkQueryPool queryPool,
- u32 query) {
+ u32 firstQuery) {
}
-@extension("VK_NVX_raytracing") // 166
-cmd VkResult vkCompileDeferredNVX(
+@extension("VK_NV_raytracing") // 166
+cmd VkResult vkCompileDeferredNV(
VkDevice device,
VkPipeline pipeline,
u32 shader) {
diff --git a/vulkan/include/vulkan/vulkan.h b/vulkan/include/vulkan/vulkan.h
index d05c849..77da637 100644
--- a/vulkan/include/vulkan/vulkan.h
+++ b/vulkan/include/vulkan/vulkan.h
@@ -24,6 +24,10 @@
#include "vulkan_android.h"
#endif
+#ifdef VK_USE_PLATFORM_FUCHSIA
+#include <zircon/types.h>
+#include "vulkan_fuchsia.h"
+#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
#include "vulkan_ios.h"
@@ -35,12 +39,6 @@
#endif
-#ifdef VK_USE_PLATFORM_MIR_KHR
-#include <mir_toolkit/client_types.h>
-#include "vulkan_mir.h"
-#endif
-
-
#ifdef VK_USE_PLATFORM_VI_NN
#include "vulkan_vi.h"
#endif
diff --git a/vulkan/include/vulkan/vulkan_core.h b/vulkan/include/vulkan/vulkan_core.h
index ac9bb66..4cd8ed5 100644
--- a/vulkan/include/vulkan/vulkan_core.h
+++ b/vulkan/include/vulkan/vulkan_core.h
@@ -43,13 +43,12 @@
#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff)
#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff)
// Version of this file
-#define VK_HEADER_VERSION 90
+#define VK_HEADER_VERSION 91
#define VK_NULL_HANDLE 0
-
#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object;
@@ -62,7 +61,6 @@
#endif
-
typedef uint32_t VkFlags;
typedef uint32_t VkBool32;
typedef uint64_t VkDeviceSize;
@@ -287,7 +285,6 @@
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000,
VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000,
VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000,
- VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000,
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000,
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000,
VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000,
@@ -419,17 +416,17 @@
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002,
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005,
- VK_STRUCTURE_TYPE_RAYTRACING_PIPELINE_CREATE_INFO_NVX = 1000165000,
- VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NVX = 1000165001,
- VK_STRUCTURE_TYPE_GEOMETRY_INSTANCE_NVX = 1000165002,
- VK_STRUCTURE_TYPE_GEOMETRY_NVX = 1000165003,
- VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NVX = 1000165004,
- VK_STRUCTURE_TYPE_GEOMETRY_AABB_NVX = 1000165005,
- VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NVX = 1000165006,
- VK_STRUCTURE_TYPE_DESCRIPTOR_ACCELERATION_STRUCTURE_INFO_NVX = 1000165007,
- VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NVX = 1000165008,
- VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAYTRACING_PROPERTIES_NVX = 1000165009,
- VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NVX = 1000165010,
+ VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001,
+ VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003,
+ VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004,
+ VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005,
+ VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006,
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008,
+ VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009,
+ VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011,
+ VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000,
VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001,
VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000,
@@ -440,6 +437,7 @@
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000,
VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000,
+ VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000,
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002,
@@ -848,7 +846,7 @@
VK_QUERY_TYPE_PIPELINE_STATISTICS = 1,
VK_QUERY_TYPE_TIMESTAMP = 2,
VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004,
- VK_QUERY_TYPE_COMPACTED_SIZE_NVX = 1000165000,
+ VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000,
VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION,
VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP,
VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1),
@@ -1178,7 +1176,7 @@
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9,
VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10,
VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000,
- VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000,
+ VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,
VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT,
VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1),
@@ -1207,7 +1205,7 @@
typedef enum VkPipelineBindPoint {
VK_PIPELINE_BIND_POINT_GRAPHICS = 0,
VK_PIPELINE_BIND_POINT_COMPUTE = 1,
- VK_PIPELINE_BIND_POINT_RAYTRACING_NVX = 1000165000,
+ VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000,
VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1),
@@ -1226,6 +1224,7 @@
typedef enum VkIndexType {
VK_INDEX_TYPE_UINT16 = 0,
VK_INDEX_TYPE_UINT32 = 1,
+ VK_INDEX_TYPE_NONE_NV = 1000165000,
VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16,
VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32,
VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1),
@@ -1279,7 +1278,7 @@
VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001,
VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000,
VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000,
- VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000,
+ VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE,
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION,
VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN,
@@ -1447,7 +1446,8 @@
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000,
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000,
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000,
- VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX = 0x00200000,
+ VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = 0x00200000,
+ VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000,
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000,
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000,
VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
@@ -1544,7 +1544,7 @@
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800,
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000,
VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200,
- VK_BUFFER_USAGE_RAYTRACING_BIT_NVX = 0x00000400,
+ VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400,
VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkBufferUsageFlagBits;
typedef VkFlags VkBufferUsageFlags;
@@ -1559,7 +1559,7 @@
VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004,
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008,
VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010,
- VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NVX = 0x00000020,
+ VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020,
VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT,
VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE,
VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
@@ -1576,12 +1576,12 @@
VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020,
VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F,
VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
- VK_SHADER_STAGE_RAYGEN_BIT_NVX = 0x00000100,
- VK_SHADER_STAGE_ANY_HIT_BIT_NVX = 0x00000200,
- VK_SHADER_STAGE_CLOSEST_HIT_BIT_NVX = 0x00000400,
- VK_SHADER_STAGE_MISS_BIT_NVX = 0x00000800,
- VK_SHADER_STAGE_INTERSECTION_BIT_NVX = 0x00001000,
- VK_SHADER_STAGE_CALLABLE_BIT_NVX = 0x00002000,
+ VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100,
+ VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200,
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400,
+ VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800,
+ VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000,
+ VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000,
VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040,
VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080,
VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
@@ -1673,8 +1673,8 @@
VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000,
VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000,
VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000,
- VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX = 0x00200000,
- VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX = 0x00400000,
+ VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000,
+ VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000,
VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
} VkAccessFlagBits;
typedef VkFlags VkAccessFlags;
@@ -6182,7 +6182,7 @@
VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33,
VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000,
VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000,
- VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX_EXT = 1000165000,
+ VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000,
VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT,
@@ -8113,81 +8113,113 @@
const VkCoarseSampleOrderCustomNV* pCustomSampleOrders);
#endif
-#define VK_NVX_raytracing 1
-VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNVX)
+#define VK_NV_ray_tracing 1
+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV)
-#define VK_NVX_RAYTRACING_SPEC_VERSION 1
-#define VK_NVX_RAYTRACING_EXTENSION_NAME "VK_NVX_raytracing"
+#define VK_NV_RAY_TRACING_SPEC_VERSION 2
+#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing"
+#define VK_SHADER_UNUSED_NV (~0U)
-typedef enum VkGeometryTypeNVX {
- VK_GEOMETRY_TYPE_TRIANGLES_NVX = 0,
- VK_GEOMETRY_TYPE_AABBS_NVX = 1,
- VK_GEOMETRY_TYPE_BEGIN_RANGE_NVX = VK_GEOMETRY_TYPE_TRIANGLES_NVX,
- VK_GEOMETRY_TYPE_END_RANGE_NVX = VK_GEOMETRY_TYPE_AABBS_NVX,
- VK_GEOMETRY_TYPE_RANGE_SIZE_NVX = (VK_GEOMETRY_TYPE_AABBS_NVX - VK_GEOMETRY_TYPE_TRIANGLES_NVX + 1),
- VK_GEOMETRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkGeometryTypeNVX;
+typedef enum VkRayTracingShaderGroupTypeNV {
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_BEGIN_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_END_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV,
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_RANGE_SIZE_NV = (VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV - VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV + 1),
+ VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkRayTracingShaderGroupTypeNV;
-typedef enum VkAccelerationStructureTypeNVX {
- VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX = 0,
- VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX = 1,
- VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NVX = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX,
- VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NVX = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX,
- VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NVX = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX + 1),
- VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkAccelerationStructureTypeNVX;
+typedef enum VkGeometryTypeNV {
+ VK_GEOMETRY_TYPE_TRIANGLES_NV = 0,
+ VK_GEOMETRY_TYPE_AABBS_NV = 1,
+ VK_GEOMETRY_TYPE_BEGIN_RANGE_NV = VK_GEOMETRY_TYPE_TRIANGLES_NV,
+ VK_GEOMETRY_TYPE_END_RANGE_NV = VK_GEOMETRY_TYPE_AABBS_NV,
+ VK_GEOMETRY_TYPE_RANGE_SIZE_NV = (VK_GEOMETRY_TYPE_AABBS_NV - VK_GEOMETRY_TYPE_TRIANGLES_NV + 1),
+ VK_GEOMETRY_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkGeometryTypeNV;
-typedef enum VkCopyAccelerationStructureModeNVX {
- VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX = 0,
- VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX = 1,
- VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NVX = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX,
- VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NVX = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX,
- VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NVX = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX + 1),
- VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkCopyAccelerationStructureModeNVX;
+typedef enum VkAccelerationStructureTypeNV {
+ VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0,
+ VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1,
+ VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV,
+ VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV,
+ VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1),
+ VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkAccelerationStructureTypeNV;
+
+typedef enum VkCopyAccelerationStructureModeNV {
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0,
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1,
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV,
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV,
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NV = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV + 1),
+ VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkCopyAccelerationStructureModeNV;
+
+typedef enum VkAccelerationStructureMemoryRequirementsTypeNV {
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV,
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + 1),
+ VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF
+} VkAccelerationStructureMemoryRequirementsTypeNV;
-typedef enum VkGeometryFlagBitsNVX {
- VK_GEOMETRY_OPAQUE_BIT_NVX = 0x00000001,
- VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NVX = 0x00000002,
- VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkGeometryFlagBitsNVX;
-typedef VkFlags VkGeometryFlagsNVX;
+typedef enum VkGeometryFlagBitsNV {
+ VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001,
+ VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002,
+ VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkGeometryFlagBitsNV;
+typedef VkFlags VkGeometryFlagsNV;
-typedef enum VkGeometryInstanceFlagBitsNVX {
- VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NVX = 0x00000001,
- VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_FLIP_WINDING_BIT_NVX = 0x00000002,
- VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NVX = 0x00000004,
- VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NVX = 0x00000008,
- VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkGeometryInstanceFlagBitsNVX;
-typedef VkFlags VkGeometryInstanceFlagsNVX;
+typedef enum VkGeometryInstanceFlagBitsNV {
+ VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001,
+ VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002,
+ VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004,
+ VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008,
+ VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkGeometryInstanceFlagBitsNV;
+typedef VkFlags VkGeometryInstanceFlagsNV;
-typedef enum VkBuildAccelerationStructureFlagBitsNVX {
- VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NVX = 0x00000001,
- VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NVX = 0x00000002,
- VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NVX = 0x00000004,
- VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NVX = 0x00000008,
- VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NVX = 0x00000010,
- VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF
-} VkBuildAccelerationStructureFlagBitsNVX;
-typedef VkFlags VkBuildAccelerationStructureFlagsNVX;
+typedef enum VkBuildAccelerationStructureFlagBitsNV {
+ VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001,
+ VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002,
+ VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004,
+ VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008,
+ VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010,
+ VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF
+} VkBuildAccelerationStructureFlagBitsNV;
+typedef VkFlags VkBuildAccelerationStructureFlagsNV;
-typedef struct VkRaytracingPipelineCreateInfoNVX {
- VkStructureType sType;
- const void* pNext;
- VkPipelineCreateFlags flags;
- uint32_t stageCount;
- const VkPipelineShaderStageCreateInfo* pStages;
- const uint32_t* pGroupNumbers;
- uint32_t maxRecursionDepth;
- VkPipelineLayout layout;
- VkPipeline basePipelineHandle;
- int32_t basePipelineIndex;
-} VkRaytracingPipelineCreateInfoNVX;
+typedef struct VkRayTracingShaderGroupCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkRayTracingShaderGroupTypeNV type;
+ uint32_t generalShader;
+ uint32_t closestHitShader;
+ uint32_t anyHitShader;
+ uint32_t intersectionShader;
+} VkRayTracingShaderGroupCreateInfoNV;
-typedef struct VkGeometryTrianglesNVX {
+typedef struct VkRayTracingPipelineCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkPipelineCreateFlags flags;
+ uint32_t stageCount;
+ const VkPipelineShaderStageCreateInfo* pStages;
+ uint32_t groupCount;
+ const VkRayTracingShaderGroupCreateInfoNV* pGroups;
+ uint32_t maxRecursionDepth;
+ VkPipelineLayout layout;
+ VkPipeline basePipelineHandle;
+ int32_t basePipelineIndex;
+} VkRayTracingPipelineCreateInfoNV;
+
+typedef struct VkGeometryTrianglesNV {
VkStructureType sType;
const void* pNext;
VkBuffer vertexData;
@@ -8201,136 +8233,138 @@
VkIndexType indexType;
VkBuffer transformData;
VkDeviceSize transformOffset;
-} VkGeometryTrianglesNVX;
+} VkGeometryTrianglesNV;
-typedef struct VkGeometryAABBNVX {
+typedef struct VkGeometryAABBNV {
VkStructureType sType;
const void* pNext;
VkBuffer aabbData;
uint32_t numAABBs;
uint32_t stride;
VkDeviceSize offset;
-} VkGeometryAABBNVX;
+} VkGeometryAABBNV;
-typedef struct VkGeometryDataNVX {
- VkGeometryTrianglesNVX triangles;
- VkGeometryAABBNVX aabbs;
-} VkGeometryDataNVX;
+typedef struct VkGeometryDataNV {
+ VkGeometryTrianglesNV triangles;
+ VkGeometryAABBNV aabbs;
+} VkGeometryDataNV;
-typedef struct VkGeometryNVX {
- VkStructureType sType;
- const void* pNext;
- VkGeometryTypeNVX geometryType;
- VkGeometryDataNVX geometry;
- VkGeometryFlagsNVX flags;
-} VkGeometryNVX;
+typedef struct VkGeometryNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkGeometryTypeNV geometryType;
+ VkGeometryDataNV geometry;
+ VkGeometryFlagsNV flags;
+} VkGeometryNV;
-typedef struct VkAccelerationStructureCreateInfoNVX {
- VkStructureType sType;
- const void* pNext;
- VkAccelerationStructureTypeNVX type;
- VkBuildAccelerationStructureFlagsNVX flags;
- VkDeviceSize compactedSize;
- uint32_t instanceCount;
- uint32_t geometryCount;
- const VkGeometryNVX* pGeometries;
-} VkAccelerationStructureCreateInfoNVX;
+typedef struct VkAccelerationStructureInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccelerationStructureTypeNV type;
+ VkBuildAccelerationStructureFlagsNV flags;
+ uint32_t instanceCount;
+ uint32_t geometryCount;
+ const VkGeometryNV* pGeometries;
+} VkAccelerationStructureInfoNV;
-typedef struct VkBindAccelerationStructureMemoryInfoNVX {
- VkStructureType sType;
- const void* pNext;
- VkAccelerationStructureNVX accelerationStructure;
- VkDeviceMemory memory;
- VkDeviceSize memoryOffset;
- uint32_t deviceIndexCount;
- const uint32_t* pDeviceIndices;
-} VkBindAccelerationStructureMemoryInfoNVX;
+typedef struct VkAccelerationStructureCreateInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkDeviceSize compactedSize;
+ VkAccelerationStructureInfoNV info;
+} VkAccelerationStructureCreateInfoNV;
-typedef struct VkDescriptorAccelerationStructureInfoNVX {
- VkStructureType sType;
- const void* pNext;
- uint32_t accelerationStructureCount;
- const VkAccelerationStructureNVX* pAccelerationStructures;
-} VkDescriptorAccelerationStructureInfoNVX;
+typedef struct VkBindAccelerationStructureMemoryInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccelerationStructureNV accelerationStructure;
+ VkDeviceMemory memory;
+ VkDeviceSize memoryOffset;
+ uint32_t deviceIndexCount;
+ const uint32_t* pDeviceIndices;
+} VkBindAccelerationStructureMemoryInfoNV;
-typedef struct VkAccelerationStructureMemoryRequirementsInfoNVX {
- VkStructureType sType;
- const void* pNext;
- VkAccelerationStructureNVX accelerationStructure;
-} VkAccelerationStructureMemoryRequirementsInfoNVX;
+typedef struct VkWriteDescriptorSetAccelerationStructureNV {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t accelerationStructureCount;
+ const VkAccelerationStructureNV* pAccelerationStructures;
+} VkWriteDescriptorSetAccelerationStructureNV;
-typedef struct VkPhysicalDeviceRaytracingPropertiesNVX {
+typedef struct VkAccelerationStructureMemoryRequirementsInfoNV {
+ VkStructureType sType;
+ const void* pNext;
+ VkAccelerationStructureMemoryRequirementsTypeNV type;
+ VkAccelerationStructureNV accelerationStructure;
+} VkAccelerationStructureMemoryRequirementsInfoNV;
+
+typedef struct VkPhysicalDeviceRayTracingPropertiesNV {
VkStructureType sType;
void* pNext;
- uint32_t shaderHeaderSize;
+ uint32_t shaderGroupHandleSize;
uint32_t maxRecursionDepth;
- uint32_t maxGeometryCount;
-} VkPhysicalDeviceRaytracingPropertiesNVX;
+ uint32_t maxShaderGroupStride;
+ uint32_t shaderGroupBaseAlignment;
+ uint64_t maxGeometryCount;
+ uint64_t maxInstanceCount;
+ uint64_t maxTriangleCount;
+ uint32_t maxDescriptorSetAccelerationStructures;
+} VkPhysicalDeviceRayTracingPropertiesNV;
-typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNVX)(VkDevice device, const VkAccelerationStructureCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNVX* pAccelerationStructure);
-typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNVX)(VkDevice device, VkAccelerationStructureNVX accelerationStructure, const VkAllocationCallbacks* pAllocator);
-typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNVX)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
-typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureScratchMemoryRequirementsNVX)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
-typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNVX)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos);
-typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureTypeNVX type, uint32_t instanceCount, VkBuffer instanceData, VkDeviceSize instanceOffset, uint32_t geometryCount, const VkGeometryNVX* pGeometries, VkBuildAccelerationStructureFlagsNVX flags, VkBool32 update, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkBuffer scratch, VkDeviceSize scratchOffset);
-typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkCopyAccelerationStructureModeNVX mode);
-typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNVX)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, uint32_t width, uint32_t height);
-typedef VkResult (VKAPI_PTR *PFN_vkCreateRaytracingPipelinesNVX)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRaytracingPipelineCreateInfoNVX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
-typedef VkResult (VKAPI_PTR *PFN_vkGetRaytracingShaderHandlesNVX)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);
-typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNVX)(VkDevice device, VkAccelerationStructureNVX accelerationStructure, size_t dataSize, void* pData);
-typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructurePropertiesNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureNVX accelerationStructure, VkQueryType queryType, VkQueryPool queryPool, uint32_t query);
-typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNVX)(VkDevice device, VkPipeline pipeline, uint32_t shader);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure);
+typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator);
+typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
+typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);
+typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset);
+typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode);
+typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth);
+typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);
+typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData);
+typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery);
+typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader);
#ifndef VK_NO_PROTOTYPES
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV(
VkDevice device,
- const VkAccelerationStructureCreateInfoNVX* pCreateInfo,
+ const VkAccelerationStructureCreateInfoNV* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkAccelerationStructureNVX* pAccelerationStructure);
+ VkAccelerationStructureNV* pAccelerationStructure);
-VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNVX(
+VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV(
VkDevice device,
- VkAccelerationStructureNVX accelerationStructure,
+ VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks* pAllocator);
-VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNVX(
+VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV(
VkDevice device,
- const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo,
+ const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
VkMemoryRequirements2KHR* pMemoryRequirements);
-VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureScratchMemoryRequirementsNVX(
- VkDevice device,
- const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo,
- VkMemoryRequirements2KHR* pMemoryRequirements);
-
-VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV(
VkDevice device,
uint32_t bindInfoCount,
- const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos);
+ const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);
-VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNVX(
+VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureTypeNVX type,
- uint32_t instanceCount,
+ const VkAccelerationStructureInfoNV* pInfo,
VkBuffer instanceData,
VkDeviceSize instanceOffset,
- uint32_t geometryCount,
- const VkGeometryNVX* pGeometries,
- VkBuildAccelerationStructureFlagsNVX flags,
VkBool32 update,
- VkAccelerationStructureNVX dst,
- VkAccelerationStructureNVX src,
+ VkAccelerationStructureNV dst,
+ VkAccelerationStructureNV src,
VkBuffer scratch,
VkDeviceSize scratchOffset);
-VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNVX(
+VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureNVX dst,
- VkAccelerationStructureNVX src,
- VkCopyAccelerationStructureModeNVX mode);
+ VkAccelerationStructureNV dst,
+ VkAccelerationStructureNV src,
+ VkCopyAccelerationStructureModeNV mode);
-VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNVX(
+VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV(
VkCommandBuffer commandBuffer,
VkBuffer raygenShaderBindingTableBuffer,
VkDeviceSize raygenShaderBindingOffset,
@@ -8340,18 +8374,22 @@
VkBuffer hitShaderBindingTableBuffer,
VkDeviceSize hitShaderBindingOffset,
VkDeviceSize hitShaderBindingStride,
+ VkBuffer callableShaderBindingTableBuffer,
+ VkDeviceSize callableShaderBindingOffset,
+ VkDeviceSize callableShaderBindingStride,
uint32_t width,
- uint32_t height);
+ uint32_t height,
+ uint32_t depth);
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateRaytracingPipelinesNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
- const VkRaytracingPipelineCreateInfoNVX* pCreateInfos,
+ const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines);
-VKAPI_ATTR VkResult VKAPI_CALL vkGetRaytracingShaderHandlesNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV(
VkDevice device,
VkPipeline pipeline,
uint32_t firstGroup,
@@ -8359,20 +8397,21 @@
size_t dataSize,
void* pData);
-VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV(
VkDevice device,
- VkAccelerationStructureNVX accelerationStructure,
+ VkAccelerationStructureNV accelerationStructure,
size_t dataSize,
void* pData);
-VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructurePropertiesNVX(
+VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV(
VkCommandBuffer commandBuffer,
- VkAccelerationStructureNVX accelerationStructure,
+ uint32_t accelerationStructureCount,
+ const VkAccelerationStructureNV* pAccelerationStructures,
VkQueryType queryType,
VkQueryPool queryPool,
- uint32_t query);
+ uint32_t firstQuery);
-VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNVX(
+VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV(
VkDevice device,
VkPipeline pipeline,
uint32_t shader);
@@ -8534,6 +8573,29 @@
+#define VK_AMD_memory_overallocation_behavior 1
+#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1
+#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior"
+
+
+typedef enum VkMemoryOverallocationBehaviorAMD {
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_BEGIN_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_END_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD,
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_RANGE_SIZE_AMD = (VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD - VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD + 1),
+ VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF
+} VkMemoryOverallocationBehaviorAMD;
+
+typedef struct VkDeviceMemoryOverallocationCreateInfoAMD {
+ VkStructureType sType;
+ const void* pNext;
+ VkMemoryOverallocationBehaviorAMD overallocationBehavior;
+} VkDeviceMemoryOverallocationCreateInfoAMD;
+
+
+
#define VK_EXT_vertex_attribute_divisor 1
#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3
#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor"