blob: 96bfc1061d4e734f0c53628c58d46cc859116174 [file] [log] [blame]
Derek Sollenberger0e3cba32016-11-09 11:58:36 -05001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "VulkanManager.h"
18
Alec Mouri6db59a62019-08-02 17:05:26 -070019#include <EGL/egl.h>
20#include <EGL/eglext.h>
Stan Ilievaaa9e832019-09-17 14:07:23 -040021#include <GrBackendSemaphore.h>
22#include <GrBackendSurface.h>
Adlai Hollerf8c434e2020-07-27 11:42:45 -040023#include <GrDirectContext.h>
Stan Ilievaaa9e832019-09-17 14:07:23 -040024#include <GrTypes.h>
25#include <android/sync.h>
Jagadeesh Pakaravoorb624af32020-05-01 00:01:40 +000026#include <ui/FatVector.h>
Stan Ilievaaa9e832019-09-17 14:07:23 -040027#include <vk/GrVkExtensions.h>
28#include <vk/GrVkTypes.h>
Stan Iliev305e13a2018-11-13 11:14:48 -050029
Alec Mouriaa3e4982020-12-14 14:47:57 -080030#include <cstring>
31
rnleece9762b2021-05-21 15:40:53 -070032#include <gui/TraceUtils.h>
Greg Danielcd558522016-11-17 13:31:40 -050033#include "Properties.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050034#include "RenderThread.h"
Greg Danielbe2803a2021-02-19 18:32:16 -050035#include "pipeline/skia/ShaderCache.h"
Greg Daniel45ec62b2017-01-04 14:27:00 -050036#include "renderstate/RenderState.h"
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050037
Leon Scroggins III7ccb8a42021-11-30 14:17:28 -050038#undef LOG_TAG
39#define LOG_TAG "VulkanManager"
40
Derek Sollenberger0e3cba32016-11-09 11:58:36 -050041namespace android {
42namespace uirenderer {
43namespace renderthread {
44
John Reckf6067df2023-04-11 16:27:51 -040045static std::array<std::string_view, 18> sEnableExtensions{
46 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
47 VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
48 VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
49 VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
50 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
51 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
52 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
53 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
54 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
55 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
56 VK_KHR_SURFACE_EXTENSION_NAME,
57 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
58 VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME,
59 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
60 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
61 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
62 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
63 VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
64};
65
66static bool shouldEnableExtension(const std::string_view& extension) {
67 for (const auto& it : sEnableExtensions) {
68 if (it == extension) {
69 return true;
70 }
71 }
72 return false;
73}
74
Bo Liu7b8c1eb2019-01-08 20:17:55 -080075static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
76 // All Vulkan structs that could be part of the features chain will start with the
77 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
78 // so we can get access to the pNext for the next struct.
79 struct CommonVulkanHeader {
80 VkStructureType sType;
John Reck0fa0cbc2019-04-05 16:57:46 -070081 void* pNext;
Bo Liu7b8c1eb2019-01-08 20:17:55 -080082 };
83
84 void* pNext = features.pNext;
85 while (pNext) {
86 void* current = pNext;
87 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
88 free(current);
89 }
90}
91
Alec Mouriaa3e4982020-12-14 14:47:57 -080092GrVkGetProc VulkanManager::sSkiaGetProp = [](const char* proc_name, VkInstance instance,
93 VkDevice device) {
94 if (device != VK_NULL_HANDLE) {
95 if (strcmp("vkQueueSubmit", proc_name) == 0) {
96 return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueSubmit;
97 } else if (strcmp("vkQueueWaitIdle", proc_name) == 0) {
98 return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueWaitIdle;
99 }
100 return vkGetDeviceProcAddr(device, proc_name);
101 }
102 return vkGetInstanceProcAddr(instance, proc_name);
103};
104
Greg Daniel2ff202712018-06-14 11:50:10 -0400105#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
106#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
107#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500108
Derek Sollenberger802fefa2020-08-13 16:53:30 -0400109sp<VulkanManager> VulkanManager::getInstance() {
110 // cache a weakptr to the context to enable a second thread to share the same vulkan state
111 static wp<VulkanManager> sWeakInstance = nullptr;
112 static std::mutex sLock;
113
114 std::lock_guard _lock{sLock};
115 sp<VulkanManager> vulkanManager = sWeakInstance.promote();
116 if (!vulkanManager.get()) {
117 vulkanManager = new VulkanManager();
118 sWeakInstance = vulkanManager;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500119 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500120
Derek Sollenberger802fefa2020-08-13 16:53:30 -0400121 return vulkanManager;
122}
123
124VulkanManager::~VulkanManager() {
Greg Daniel2ff202712018-06-14 11:50:10 -0400125 if (mDevice != VK_NULL_HANDLE) {
126 mDeviceWaitIdle(mDevice);
127 mDestroyDevice(mDevice, nullptr);
John Reck1bcacfd2017-11-03 10:12:19 -0700128 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500129
Greg Daniel2ff202712018-06-14 11:50:10 -0400130 if (mInstance != VK_NULL_HANDLE) {
131 mDestroyInstance(mInstance, nullptr);
132 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500133
Greg Daniel2ff202712018-06-14 11:50:10 -0400134 mGraphicsQueue = VK_NULL_HANDLE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400135 mDevice = VK_NULL_HANDLE;
136 mPhysicalDevice = VK_NULL_HANDLE;
137 mInstance = VK_NULL_HANDLE;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800138 mInstanceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800139 mInstanceExtensions.clear();
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800140 mDeviceExtensionsOwner.clear();
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800141 mDeviceExtensions.clear();
142 free_features_extensions_structs(mPhysicalDeviceFeatures2);
143 mPhysicalDeviceFeatures2 = {};
Greg Daniel2ff202712018-06-14 11:50:10 -0400144}
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500145
Stan Iliev90276c82019-02-03 18:01:02 -0500146void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400147 VkResult err;
148
149 constexpr VkApplicationInfo app_info = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700150 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
151 nullptr, // pNext
152 "android framework", // pApplicationName
153 0, // applicationVersion
154 "android framework", // pEngineName
155 0, // engineVerison
156 mAPIVersion, // apiVersion
Greg Daniel2ff202712018-06-14 11:50:10 -0400157 };
158
Greg Daniel2ff202712018-06-14 11:50:10 -0400159 {
160 GET_PROC(EnumerateInstanceExtensionProperties);
161
162 uint32_t extensionCount = 0;
163 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500164 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800165 mInstanceExtensionsOwner.resize(extensionCount);
166 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
167 mInstanceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500168 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400169 bool hasKHRSurfaceExtension = false;
170 bool hasKHRAndroidSurfaceExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800171 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
John Reckf6067df2023-04-11 16:27:51 -0400172 if (!shouldEnableExtension(extension.extensionName)) {
173 ALOGV("Not enabling instance extension %s", extension.extensionName);
174 continue;
175 }
176 ALOGV("Enabling instance extension %s", extension.extensionName);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800177 mInstanceExtensions.push_back(extension.extensionName);
178 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400179 hasKHRSurfaceExtension = true;
180 }
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800181 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400182 hasKHRAndroidSurfaceExtension = true;
183 }
184 }
Stan Iliev90276c82019-02-03 18:01:02 -0500185 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400186 }
187
188 const VkInstanceCreateInfo instance_create = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700189 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
190 nullptr, // pNext
191 0, // flags
192 &app_info, // pApplicationInfo
193 0, // enabledLayerNameCount
194 nullptr, // ppEnabledLayerNames
195 (uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
196 mInstanceExtensions.data(), // ppEnabledExtensionNames
Greg Daniel2ff202712018-06-14 11:50:10 -0400197 };
198
199 GET_PROC(CreateInstance);
200 err = mCreateInstance(&instance_create, nullptr, &mInstance);
Stan Iliev90276c82019-02-03 18:01:02 -0500201 LOG_ALWAYS_FATAL_IF(err < 0);
Greg Daniel2ff202712018-06-14 11:50:10 -0400202
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700203 GET_INST_PROC(CreateDevice);
Greg Daniel2ff202712018-06-14 11:50:10 -0400204 GET_INST_PROC(DestroyInstance);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700205 GET_INST_PROC(EnumerateDeviceExtensionProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400206 GET_INST_PROC(EnumeratePhysicalDevices);
Greg Daniela227dbb2018-08-20 09:19:48 -0400207 GET_INST_PROC(GetPhysicalDeviceFeatures2);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500208 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700209 GET_INST_PROC(GetPhysicalDeviceProperties);
210 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
Greg Daniel2ff202712018-06-14 11:50:10 -0400211
212 uint32_t gpuCount;
Stan Iliev90276c82019-02-03 18:01:02 -0500213 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
214 LOG_ALWAYS_FATAL_IF(!gpuCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400215 // Just returning the first physical device instead of getting the whole array. Since there
216 // should only be one device on android.
217 gpuCount = 1;
218 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
219 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
Stan Iliev90276c82019-02-03 18:01:02 -0500220 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400221
Greg Daniel96259622018-10-01 14:42:56 -0400222 VkPhysicalDeviceProperties physDeviceProperties;
223 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
Stan Iliev90276c82019-02-03 18:01:02 -0500224 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
Stan Ilievbf99c442019-03-29 11:09:11 -0400225 mDriverVersion = physDeviceProperties.driverVersion;
Greg Daniel96259622018-10-01 14:42:56 -0400226
Greg Daniel2ff202712018-06-14 11:50:10 -0400227 // query to get the initial queue props size
228 uint32_t queueCount;
229 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500230 LOG_ALWAYS_FATAL_IF(!queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400231
232 // now get the actual queue props
233 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
234 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
235
236 // iterate to find the graphics queue
237 mGraphicsQueueIndex = queueCount;
238 for (uint32_t i = 0; i < queueCount; i++) {
239 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
240 mGraphicsQueueIndex = i;
241 break;
242 }
243 }
Stan Iliev90276c82019-02-03 18:01:02 -0500244 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400245
Greg Daniel2ff202712018-06-14 11:50:10 -0400246 {
247 uint32_t extensionCount = 0;
248 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700249 nullptr);
Stan Iliev90276c82019-02-03 18:01:02 -0500250 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800251 mDeviceExtensionsOwner.resize(extensionCount);
Greg Daniel2ff202712018-06-14 11:50:10 -0400252 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
John Reck0fa0cbc2019-04-05 16:57:46 -0700253 mDeviceExtensionsOwner.data());
Stan Iliev90276c82019-02-03 18:01:02 -0500254 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
Greg Daniel2ff202712018-06-14 11:50:10 -0400255 bool hasKHRSwapchainExtension = false;
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800256 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
John Reckf6067df2023-04-11 16:27:51 -0400257 if (!shouldEnableExtension(extension.extensionName)) {
258 ALOGV("Not enabling device extension %s", extension.extensionName);
259 continue;
260 }
261 ALOGV("Enabling device extension %s", extension.extensionName);
Roman Kiryanov74ace839e2019-03-07 18:22:19 -0800262 mDeviceExtensions.push_back(extension.extensionName);
263 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
Greg Daniel2ff202712018-06-14 11:50:10 -0400264 hasKHRSwapchainExtension = true;
265 }
266 }
Stan Iliev90276c82019-02-03 18:01:02 -0500267 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
Greg Daniel2ff202712018-06-14 11:50:10 -0400268 }
269
Alec Mouriaa3e4982020-12-14 14:47:57 -0800270 grExtensions.init(sSkiaGetProp, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
John Reck0fa0cbc2019-04-05 16:57:46 -0700271 mInstanceExtensions.data(), mDeviceExtensions.size(),
272 mDeviceExtensions.data());
Greg Daniela227dbb2018-08-20 09:19:48 -0400273
Stan Iliev90276c82019-02-03 18:01:02 -0500274 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
Greg Daniel26e0dca2018-09-18 10:33:19 -0400275
Greg Daniela227dbb2018-08-20 09:19:48 -0400276 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
277 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
278 features.pNext = nullptr;
279
280 // Setup all extension feature structs we may want to use.
281 void** tailPNext = &features.pNext;
282
283 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
284 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
John Reck0fa0cbc2019-04-05 16:57:46 -0700285 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
Greg Daniela227dbb2018-08-20 09:19:48 -0400286 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
287 LOG_ALWAYS_FATAL_IF(!blend);
288 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
289 blend->pNext = nullptr;
290 *tailPNext = blend;
291 tailPNext = &blend->pNext;
292 }
293
Greg Daniel05036172018-11-28 17:08:04 -0500294 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
John Reck0fa0cbc2019-04-05 16:57:46 -0700295 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
Greg Daniel05036172018-11-28 17:08:04 -0500296 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
297 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
298 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
299 ycbcrFeature->pNext = nullptr;
300 *tailPNext = ycbcrFeature;
301 tailPNext = &ycbcrFeature->pNext;
302
Greg Daniela227dbb2018-08-20 09:19:48 -0400303 // query to get the physical device features
304 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
Greg Daniel2ff202712018-06-14 11:50:10 -0400305 // this looks like it would slow things down,
306 // and we can't depend on it on all platforms
Greg Daniela227dbb2018-08-20 09:19:48 -0400307 features.features.robustBufferAccess = VK_FALSE;
Greg Daniel2ff202712018-06-14 11:50:10 -0400308
John Reck0fa0cbc2019-04-05 16:57:46 -0700309 float queuePriorities[1] = {0.0};
Greg Daniel2ff202712018-06-14 11:50:10 -0400310
Stan Iliev7e733362019-02-28 13:16:36 -0500311 void* queueNextPtr = nullptr;
312
313 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
314
John Reck0fa0cbc2019-04-05 16:57:46 -0700315 if (Properties::contextPriority != 0 &&
316 grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
Stan Iliev7e733362019-02-28 13:16:36 -0500317 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
318 queuePriorityCreateInfo.sType =
John Reck0fa0cbc2019-04-05 16:57:46 -0700319 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
Stan Iliev7e733362019-02-28 13:16:36 -0500320 queuePriorityCreateInfo.pNext = nullptr;
321 switch (Properties::contextPriority) {
322 case EGL_CONTEXT_PRIORITY_LOW_IMG:
323 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
324 break;
325 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
326 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
327 break;
328 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
329 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
330 break;
331 default:
332 LOG_ALWAYS_FATAL("Unsupported context priority");
John Reck0fa0cbc2019-04-05 16:57:46 -0700333 }
334 queueNextPtr = &queuePriorityCreateInfo;
Stan Iliev7e733362019-02-28 13:16:36 -0500335 }
336
Yiwei Zhang276d5fc2020-09-03 12:00:00 -0700337 const VkDeviceQueueCreateInfo queueInfo = {
338 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
339 queueNextPtr, // pNext
340 0, // VkDeviceQueueCreateFlags
341 mGraphicsQueueIndex, // queueFamilyIndex
Alec Mouriaa3e4982020-12-14 14:47:57 -0800342 1, // queueCount
Yiwei Zhang276d5fc2020-09-03 12:00:00 -0700343 queuePriorities, // pQueuePriorities
344 };
Greg Daniel2ff202712018-06-14 11:50:10 -0400345
346 const VkDeviceCreateInfo deviceInfo = {
John Reck0fa0cbc2019-04-05 16:57:46 -0700347 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
348 &features, // pNext
349 0, // VkDeviceCreateFlags
Yiwei Zhang276d5fc2020-09-03 12:00:00 -0700350 1, // queueCreateInfoCount
351 &queueInfo, // pQueueCreateInfos
John Reck0fa0cbc2019-04-05 16:57:46 -0700352 0, // layerCount
353 nullptr, // ppEnabledLayerNames
354 (uint32_t)mDeviceExtensions.size(), // extensionCount
355 mDeviceExtensions.data(), // ppEnabledExtensionNames
356 nullptr, // ppEnabledFeatures
Greg Daniel2ff202712018-06-14 11:50:10 -0400357 };
358
Stan Iliev90276c82019-02-03 18:01:02 -0500359 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
Greg Daniel2ff202712018-06-14 11:50:10 -0400360
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500361 GET_DEV_PROC(AllocateCommandBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500362 GET_DEV_PROC(BeginCommandBuffer);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500363 GET_DEV_PROC(CmdPipelineBarrier);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700364 GET_DEV_PROC(CreateCommandPool);
365 GET_DEV_PROC(CreateFence);
366 GET_DEV_PROC(CreateSemaphore);
367 GET_DEV_PROC(DestroyCommandPool);
368 GET_DEV_PROC(DestroyDevice);
369 GET_DEV_PROC(DestroyFence);
370 GET_DEV_PROC(DestroySemaphore);
371 GET_DEV_PROC(DeviceWaitIdle);
372 GET_DEV_PROC(EndCommandBuffer);
373 GET_DEV_PROC(FreeCommandBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500374 GET_DEV_PROC(GetDeviceQueue);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700375 GET_DEV_PROC(GetSemaphoreFdKHR);
376 GET_DEV_PROC(ImportSemaphoreFdKHR);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500377 GET_DEV_PROC(QueueSubmit);
378 GET_DEV_PROC(QueueWaitIdle);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700379 GET_DEV_PROC(ResetCommandBuffer);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500380 GET_DEV_PROC(ResetFences);
Yiwei Zhang0b9f0b82019-08-15 11:33:59 -0700381 GET_DEV_PROC(WaitForFences);
Hugues Evrardb9510a02021-03-01 14:35:22 +0000382 GET_DEV_PROC(FrameBoundaryANDROID);
Greg Daniel2ff202712018-06-14 11:50:10 -0400383}
384
385void VulkanManager::initialize() {
Greg Daniel1d9de712021-05-14 09:28:34 -0400386 std::lock_guard _lock{mInitializeLock};
387
Greg Daniel2ff202712018-06-14 11:50:10 -0400388 if (mDevice != VK_NULL_HANDLE) {
389 return;
390 }
391
Greg Daniela227dbb2018-08-20 09:19:48 -0400392 GET_PROC(EnumerateInstanceVersion);
Greg Danieleaf310e2019-01-28 16:10:32 -0500393 uint32_t instanceVersion;
394 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
395 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
Greg Daniela227dbb2018-08-20 09:19:48 -0400396
Stan Iliev981afe72019-02-13 14:24:33 -0500397 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
Greg Daniel2ff202712018-06-14 11:50:10 -0400398
399 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
400
Greg Danielcd558522016-11-17 13:31:40 -0500401 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
402 mSwapBehavior = SwapBehavior::BufferAge;
403 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500404}
405
Derek Sollenberger802fefa2020-08-13 16:53:30 -0400406sk_sp<GrDirectContext> VulkanManager::createContext(const GrContextOptions& options,
407 ContextType contextType) {
Stan Iliev981afe72019-02-13 14:24:33 -0500408
409 GrVkBackendContext backendContext;
410 backendContext.fInstance = mInstance;
411 backendContext.fPhysicalDevice = mPhysicalDevice;
412 backendContext.fDevice = mDevice;
Alec Mouriaa3e4982020-12-14 14:47:57 -0800413 backendContext.fQueue = mGraphicsQueue;
Stan Iliev981afe72019-02-13 14:24:33 -0500414 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
415 backendContext.fMaxAPIVersion = mAPIVersion;
416 backendContext.fVkExtensions = &mExtensions;
417 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
Alec Mouriaa3e4982020-12-14 14:47:57 -0800418 backendContext.fGetProc = sSkiaGetProp;
Stan Iliev981afe72019-02-13 14:24:33 -0500419
Adlai Hollerf8c434e2020-07-27 11:42:45 -0400420 return GrDirectContext::MakeVulkan(backendContext, options);
Stan Iliev981afe72019-02-13 14:24:33 -0500421}
422
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800423VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
424 return VkFunctorInitParams{
425 .instance = mInstance,
426 .physical_device = mPhysicalDevice,
427 .device = mDevice,
428 .queue = mGraphicsQueue,
429 .graphics_queue_index = mGraphicsQueueIndex,
Greg Danieleaf310e2019-01-28 16:10:32 -0500430 .api_version = mAPIVersion,
Bo Liu7b8c1eb2019-01-08 20:17:55 -0800431 .enabled_instance_extension_names = mInstanceExtensions.data(),
432 .enabled_instance_extension_names_length =
433 static_cast<uint32_t>(mInstanceExtensions.size()),
434 .enabled_device_extension_names = mDeviceExtensions.data(),
435 .enabled_device_extension_names_length =
436 static_cast<uint32_t>(mDeviceExtensions.size()),
437 .device_features_2 = &mPhysicalDeviceFeatures2,
438 };
439}
440
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500441Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500442 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
443
444 if (bufferInfo == nullptr) {
445 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
446 return Frame(-1, -1, 0);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500447 }
448
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500449 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500450
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500451 if (bufferInfo->dequeue_fence != -1) {
Stan Iliev197843d2019-03-21 11:34:15 -0400452 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
453 bool isSignalPending = false;
454 if (finfo != NULL) {
455 isSignalPending = finfo->status != 1;
456 sync_file_info_free(finfo);
457 }
458 if (isSignalPending) {
459 int fence_clone = dup(bufferInfo->dequeue_fence);
460 if (fence_clone == -1) {
461 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
462 errno);
463 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
464 } else {
465 VkSemaphoreCreateInfo semaphoreInfo;
466 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
467 semaphoreInfo.pNext = nullptr;
468 semaphoreInfo.flags = 0;
469 VkSemaphore semaphore;
470 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
Greg Danield6670772021-06-09 12:01:12 -0400471 if (err != VK_SUCCESS) {
472 ALOGE("Failed to create import semaphore, err: %d", err);
473 close(fence_clone);
474 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
475 } else {
476 VkImportSemaphoreFdInfoKHR importInfo;
477 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
478 importInfo.pNext = nullptr;
479 importInfo.semaphore = semaphore;
480 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
481 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
482 importInfo.fd = fence_clone;
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500483
Greg Danield6670772021-06-09 12:01:12 -0400484 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
485 if (err != VK_SUCCESS) {
486 ALOGE("Failed to import semaphore, err: %d", err);
487 mDestroySemaphore(mDevice, semaphore, nullptr);
488 close(fence_clone);
489 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
490 } else {
491 GrBackendSemaphore backendSemaphore;
492 backendSemaphore.initVulkan(semaphore);
493 // Skia will take ownership of the VkSemaphore and delete it once the wait
494 // has finished. The VkSemaphore also owns the imported fd, so it will
495 // close the fd when it is deleted.
496 bufferInfo->skSurface->wait(1, &backendSemaphore);
497 // The following flush blocks the GPU immediately instead of waiting for
498 // other drawing ops. It seems dequeue_fence is not respected otherwise.
499 // TODO: remove the flush after finding why backendSemaphore is not working.
500 bufferInfo->skSurface->flushAndSubmit();
501 }
502 }
Stan Iliev197843d2019-03-21 11:34:15 -0400503 }
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500504 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500505 }
506
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500507 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
508 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500509}
510
Greg Danield92a9b12019-04-23 10:11:04 -0400511struct DestroySemaphoreInfo {
512 PFN_vkDestroySemaphore mDestroyFunction;
513 VkDevice mDevice;
514 VkSemaphore mSemaphore;
Greg Danielfd429392019-05-09 15:44:56 -0400515 // We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
516 // (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
517 // owned by Skia and one owned by the VulkanManager. The refs are decremented each time
518 // destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
519 // done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
520 // calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
521 int mRefs = 2;
Greg Danield92a9b12019-04-23 10:11:04 -0400522
523 DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
Stan Ilievaaa9e832019-09-17 14:07:23 -0400524 VkSemaphore semaphore)
Greg Danield92a9b12019-04-23 10:11:04 -0400525 : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
526};
527
528static void destroy_semaphore(void* context) {
529 DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
Greg Danielfd429392019-05-09 15:44:56 -0400530 --info->mRefs;
531 if (!info->mRefs) {
532 info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
533 delete info;
534 }
Greg Danield92a9b12019-04-23 10:11:04 -0400535}
536
Alec Mouri3afb3972022-05-27 22:03:11 +0000537nsecs_t VulkanManager::finishFrame(SkSurface* surface) {
Greg Danielbe2803a2021-02-19 18:32:16 -0500538 ATRACE_NAME("Vulkan finish frame");
539 ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr,
540 "finishFrame already has an outstanding semaphore");
Stan Ilievbc5f06b2019-03-26 15:14:34 -0400541
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500542 VkExportSemaphoreCreateInfo exportInfo;
543 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
544 exportInfo.pNext = nullptr;
545 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500546
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500547 VkSemaphoreCreateInfo semaphoreInfo;
548 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
549 semaphoreInfo.pNext = &exportInfo;
550 semaphoreInfo.flags = 0;
551 VkSemaphore semaphore;
552 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
Greg Danielbe2803a2021-02-19 18:32:16 -0500553 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500554
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500555 GrBackendSemaphore backendSemaphore;
556 backendSemaphore.initVulkan(semaphore);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500557
Greg Danielc7ad4082020-05-14 15:38:26 -0400558 GrFlushInfo flushInfo;
Greg Danielbe2803a2021-02-19 18:32:16 -0500559 if (err == VK_SUCCESS) {
560 mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
561 flushInfo.fNumSemaphores = 1;
562 flushInfo.fSignalSemaphores = &backendSemaphore;
563 flushInfo.fFinishedProc = destroy_semaphore;
564 flushInfo.fFinishedContext = mDestroySemaphoreContext;
565 } else {
566 semaphore = VK_NULL_HANDLE;
567 }
568 GrSemaphoresSubmitted submitted =
569 surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo);
570 GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
Adlai Holler0375fee2020-09-15 12:31:22 -0400571 ALOGE_IF(!context, "Surface is not backed by gpu");
572 context->submit();
Alec Mouri3afb3972022-05-27 22:03:11 +0000573 const nsecs_t submissionTime = systemTime();
Greg Danielbe2803a2021-02-19 18:32:16 -0500574 if (semaphore != VK_NULL_HANDLE) {
575 if (submitted == GrSemaphoresSubmitted::kYes) {
576 mSwapSemaphore = semaphore;
Hugues Evrardb9510a02021-03-01 14:35:22 +0000577 if (mFrameBoundaryANDROID) {
578 // retrieve VkImage used as render target
579 VkImage image = VK_NULL_HANDLE;
580 GrBackendRenderTarget backendRenderTarget =
581 surface->getBackendRenderTarget(SkSurface::kFlushRead_BackendHandleAccess);
582 if (backendRenderTarget.isValid()) {
583 GrVkImageInfo info;
584 if (backendRenderTarget.getVkImageInfo(&info)) {
585 image = info.fImage;
586 } else {
587 ALOGE("Frame boundary: backend is not vulkan");
588 }
589 } else {
590 ALOGE("Frame boundary: invalid backend render target");
591 }
592 // frameBoundaryANDROID needs to know about mSwapSemaphore, but
593 // it won't wait on it.
594 mFrameBoundaryANDROID(mDevice, mSwapSemaphore, image);
595 }
Greg Danielbe2803a2021-02-19 18:32:16 -0500596 } else {
597 destroy_semaphore(mDestroySemaphoreContext);
598 mDestroySemaphoreContext = nullptr;
599 }
600 }
601 skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
Alec Mouri3afb3972022-05-27 22:03:11 +0000602
603 return submissionTime;
Greg Danielbe2803a2021-02-19 18:32:16 -0500604}
605
606void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
607 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
608 ATRACE_NAME("Finishing GPU work");
609 mDeviceWaitIdle(mDevice);
610 }
611
612 int fenceFd = -1;
613 if (mSwapSemaphore != VK_NULL_HANDLE) {
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500614 VkSemaphoreGetFdInfoKHR getFdInfo;
615 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
616 getFdInfo.pNext = nullptr;
Greg Danielbe2803a2021-02-19 18:32:16 -0500617 getFdInfo.semaphore = mSwapSemaphore;
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500618 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500619
Greg Danielbe2803a2021-02-19 18:32:16 -0500620 VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500621 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
622 } else {
623 ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
Alec Mouriaa3e4982020-12-14 14:47:57 -0800624
625 std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500626 mQueueWaitIdle(mGraphicsQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500627 }
Greg Daniele8dc3972021-07-15 13:38:44 -0400628 if (mDestroySemaphoreContext) {
629 destroy_semaphore(mDestroySemaphoreContext);
630 }
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500631
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500632 surface->presentCurrentBuffer(dirtyRect, fenceFd);
Greg Danielbe2803a2021-02-19 18:32:16 -0500633 mSwapSemaphore = VK_NULL_HANDLE;
634 mDestroySemaphoreContext = nullptr;
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500635}
636
637void VulkanManager::destroySurface(VulkanSurface* surface) {
638 // Make sure all submit commands have finished before starting to destroy objects.
Yiwei Zhang276d5fc2020-09-03 12:00:00 -0700639 if (VK_NULL_HANDLE != mGraphicsQueue) {
Alec Mouriaa3e4982020-12-14 14:47:57 -0800640 std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
Yiwei Zhang276d5fc2020-09-03 12:00:00 -0700641 mQueueWaitIdle(mGraphicsQueue);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500642 }
Greg Daniel2ff202712018-06-14 11:50:10 -0400643 mDeviceWaitIdle(mDevice);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500644
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500645 delete surface;
646}
647
Adlai Hollerf8c434e2020-07-27 11:42:45 -0400648VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
649 ColorMode colorMode,
Peiyong Lin3bff1352018-12-11 07:56:07 -0800650 sk_sp<SkColorSpace> surfaceColorSpace,
Adlai Hollerf8c434e2020-07-27 11:42:45 -0400651 SkColorType surfaceColorType,
652 GrDirectContext* grContext,
John Reck0fa0cbc2019-04-05 16:57:46 -0700653 uint32_t extraBuffers) {
Stan Iliev981afe72019-02-13 14:24:33 -0500654 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500655 if (!window) {
656 return nullptr;
657 }
658
Derek Sollenbergera19b71a2019-02-15 16:36:30 -0500659 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
John Reck0fa0cbc2019-04-05 16:57:46 -0700660 *this, extraBuffers);
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500661}
662
Adlai Hollerf8c434e2020-07-27 11:42:45 -0400663status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
Greg Daniel26e0dca2018-09-18 10:33:19 -0400664 if (!hasVkContext()) {
665 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
666 return INVALID_OPERATION;
667 }
668
Stan Iliev7a081272018-10-26 17:54:18 -0400669 // Block GPU on the fence.
Stan Ilievaaa9e832019-09-17 14:07:23 -0400670 int fenceFd = ::dup(fence);
Stan Iliev7a081272018-10-26 17:54:18 -0400671 if (fenceFd == -1) {
672 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
673 return -errno;
Stan Iliev564ca3e2018-09-04 22:00:00 +0000674 }
Stan Iliev7a081272018-10-26 17:54:18 -0400675
676 VkSemaphoreCreateInfo semaphoreInfo;
677 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
678 semaphoreInfo.pNext = nullptr;
679 semaphoreInfo.flags = 0;
680 VkSemaphore semaphore;
681 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
682 if (VK_SUCCESS != err) {
Greg Danield6670772021-06-09 12:01:12 -0400683 close(fenceFd);
Stan Iliev7a081272018-10-26 17:54:18 -0400684 ALOGE("Failed to create import semaphore, err: %d", err);
685 return UNKNOWN_ERROR;
686 }
687 VkImportSemaphoreFdInfoKHR importInfo;
688 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
689 importInfo.pNext = nullptr;
690 importInfo.semaphore = semaphore;
691 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
692 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
693 importInfo.fd = fenceFd;
694
695 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
696 if (VK_SUCCESS != err) {
Greg Danield92a9b12019-04-23 10:11:04 -0400697 mDestroySemaphore(mDevice, semaphore, nullptr);
Greg Danield6670772021-06-09 12:01:12 -0400698 close(fenceFd);
Stan Iliev7a081272018-10-26 17:54:18 -0400699 ALOGE("Failed to import semaphore, err: %d", err);
700 return UNKNOWN_ERROR;
701 }
702
Greg Danield92a9b12019-04-23 10:11:04 -0400703 GrBackendSemaphore beSemaphore;
704 beSemaphore.initVulkan(semaphore);
Stan Iliev7a081272018-10-26 17:54:18 -0400705
Greg Danield6670772021-06-09 12:01:12 -0400706 // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
707 // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
Greg Danield92a9b12019-04-23 10:11:04 -0400708 grContext->wait(1, &beSemaphore);
Greg Danielc7ad4082020-05-14 15:38:26 -0400709 grContext->flushAndSubmit();
Stan Iliev7a081272018-10-26 17:54:18 -0400710
Stan Iliev564ca3e2018-09-04 22:00:00 +0000711 return OK;
712}
713
Adlai Hollerf8c434e2020-07-27 11:42:45 -0400714status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
Stan Ilievaaa9e832019-09-17 14:07:23 -0400715 *nativeFence = -1;
Greg Daniel26e0dca2018-09-18 10:33:19 -0400716 if (!hasVkContext()) {
717 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
718 return INVALID_OPERATION;
719 }
720
Greg Daniel26e0dca2018-09-18 10:33:19 -0400721 VkExportSemaphoreCreateInfo exportInfo;
722 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
723 exportInfo.pNext = nullptr;
724 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
725
726 VkSemaphoreCreateInfo semaphoreInfo;
727 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
728 semaphoreInfo.pNext = &exportInfo;
729 semaphoreInfo.flags = 0;
730 VkSemaphore semaphore;
731 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
732 if (VK_SUCCESS != err) {
733 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
734 return INVALID_OPERATION;
735 }
736
Greg Danield92a9b12019-04-23 10:11:04 -0400737 GrBackendSemaphore backendSemaphore;
738 backendSemaphore.initVulkan(semaphore);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400739
Stan Ilievaaa9e832019-09-17 14:07:23 -0400740 DestroySemaphoreInfo* destroyInfo =
741 new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
Greg Danielfd429392019-05-09 15:44:56 -0400742 // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
743 // which will remove its ref to the semaphore. The VulkanManager must still release its ref,
744 // when it is done with the semaphore.
Greg Danielc7ad4082020-05-14 15:38:26 -0400745 GrFlushInfo flushInfo;
746 flushInfo.fNumSemaphores = 1;
747 flushInfo.fSignalSemaphores = &backendSemaphore;
748 flushInfo.fFinishedProc = destroy_semaphore;
749 flushInfo.fFinishedContext = destroyInfo;
750 GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
751 grContext->submit();
Greg Daniel26e0dca2018-09-18 10:33:19 -0400752
Greg Danield92a9b12019-04-23 10:11:04 -0400753 if (submitted == GrSemaphoresSubmitted::kNo) {
754 ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
Greg Danielfd429392019-05-09 15:44:56 -0400755 destroy_semaphore(destroyInfo);
Greg Danield92a9b12019-04-23 10:11:04 -0400756 return INVALID_OPERATION;
757 }
Greg Daniel26e0dca2018-09-18 10:33:19 -0400758
759 VkSemaphoreGetFdInfoKHR getFdInfo;
760 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
761 getFdInfo.pNext = nullptr;
762 getFdInfo.semaphore = semaphore;
763 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
764
765 int fenceFd = 0;
766
767 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
Greg Danielfd429392019-05-09 15:44:56 -0400768 destroy_semaphore(destroyInfo);
Greg Daniel26e0dca2018-09-18 10:33:19 -0400769 if (VK_SUCCESS != err) {
770 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
771 return INVALID_OPERATION;
772 }
Stan Ilievaaa9e832019-09-17 14:07:23 -0400773 *nativeFence = fenceFd;
Greg Daniel26e0dca2018-09-18 10:33:19 -0400774
Stan Iliev564ca3e2018-09-04 22:00:00 +0000775 return OK;
776}
777
Derek Sollenberger0e3cba32016-11-09 11:58:36 -0500778} /* namespace renderthread */
779} /* namespace uirenderer */
780} /* namespace android */