diff options
| author | Jeremy Hayes <jeremy@lunarg.com> | 2016-04-13 16:20:24 -0600 |
|---|---|---|
| committer | Jeremy Hayes <jeremy@lunarg.com> | 2016-04-18 11:44:21 -0600 |
| commit | 6998a9bdd4e212d198240c5f97e27ab17eb04226 (patch) | |
| tree | ed3369ff23ae6658a1eff935635fb8d0ad065bcf | |
| parent | 98fa3530d2cfc00a4fe212a11590964c415a11e4 (diff) | |
| download | usermoji-6998a9bdd4e212d198240c5f97e27ab17eb04226.tar.xz | |
layers: core_validation - use std mutex
Change-Id: I88af1e86c0ef93dbeab303977b8190ad1968c00d
| -rw-r--r-- | layers/core_validation.cpp | 507 |
1 files changed, 232 insertions, 275 deletions
diff --git a/layers/core_validation.cpp b/layers/core_validation.cpp index b890bb79..24d415e8 100644 --- a/layers/core_validation.cpp +++ b/layers/core_validation.cpp @@ -43,6 +43,7 @@ #include <iostream> #include <list> #include <map> +#include <mutex> #include <set> #include <stdio.h> #include <stdlib.h> @@ -239,8 +240,7 @@ struct shader_module { }; // TODO : This can be much smarter, using separate locks for separate global data -static int globalLockInitialized = 0; -static loader_platform_thread_mutex globalLock; +static std::mutex global_lock; #if MTMERGESOURCE // MTMERGESOURCE - start of direct pull static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) { @@ -4543,10 +4543,6 @@ static void init_core_validation(layer_data *instance_data, const VkAllocationCa layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation"); - if (!globalLockInitialized) { - loader_platform_thread_create_mutex(&globalLock); - globalLockInitialized = 1; - } } VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL @@ -4591,7 +4587,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; pTable->DestroyInstance(instance, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); // Clean up logging callback, if any while (my_data->logging_callback.size() > 0) { VkDebugReportCallbackEXT callback = my_data->logging_callback.back(); @@ -4602,12 +4598,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance layer_debug_report_destroy_instance(my_data->report_data); delete my_data->instance_dispatch_table; layer_data_map.erase(key); - loader_platform_thread_unlock_mutex(&globalLock); - if (layer_data_map.empty()) { - // Release mutex when destroying last instance. - loader_platform_thread_delete_mutex(&globalLock); - globalLockInitialized = 0; - } } static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { @@ -4651,7 +4641,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g return result; } - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); @@ -4677,7 +4667,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g } // Store physical device mem limits into device layer_data struct my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); ValidateLayerOrdering(*pCreateInfo); @@ -4691,7 +4681,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons dispatch_key key = get_dispatch_key(device); layer_data *dev_data = get_my_data_ptr(key, layer_data_map); // Free all the memory - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); deletePipelines(dev_data); deleteRenderPasses(dev_data); deleteCommandBuffers(dev_data); @@ -4705,10 +4695,10 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons dev_data->bufferMap.clear(); // Queues persist until device is destroyed dev_data->queueMap.clear(); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); #if MTMERGESOURCE bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()"); log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, @@ -4732,7 +4722,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons } } layer_debug_report_destroy_device(device); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); #if DISPATCH_MAP_DEBUG fprintf(stderr, "Device: %p, key: %p\n", device, key); @@ -5159,7 +5149,7 @@ vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, GLOBAL_CB_NODE *pCBNode = NULL; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // First verify that fence is not in use if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) { skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, @@ -5236,7 +5226,7 @@ vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, } // Update cmdBuffer-related data structs and mark fence in-use trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence); @@ -5249,10 +5239,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory); // TODO : Track allocations and overall size here - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo); print_mem_list(my_data); - loader_platform_thread_unlock_mutex(&globalLock); return result; } @@ -5267,11 +5256,11 @@ vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *p // buffers (on host or device) for anything other than destroying those objects will result in // undefined behavior. - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); freeMemObjInfo(my_data, device, mem, false); print_mem_list(my_data); printCBList(my_data); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator); } @@ -5437,18 +5426,18 @@ vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, Vk bool skip_call = false; #if MTMERGESOURCE // Verify fence status of submitted fences - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (uint32_t i = 0; i < fenceCount; i++) { skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; #endif VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout); if (result == VK_SUCCESS) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); // When we know that all fences are complete we can clean/remove their CBs if (waitAll || fenceCount == 1) { for (uint32_t i = 0; i < fenceCount; ++i) { @@ -5466,7 +5455,7 @@ vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, Vk // NOTE : Alternate case not handled here is when some fences have completed. In // this case for app to guarantee which fences completed it will have to call // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete. - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; @@ -5478,15 +5467,15 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, bool skipCall = false; VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; #if MTMERGESOURCE - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus"); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skipCall) return result; #endif result = dev_data->device_dispatch_table->GetFenceStatus(device, fence); bool skip_call = false; - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); if (result == VK_SUCCESS) { #if MTMERGESOURCE update_fence_tracking(dev_data, fence); @@ -5498,7 +5487,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, } decrementResources(dev_data, 1, &fence); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; return result; @@ -5508,7 +5497,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uin VkQueue *pQueue) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); // Add queue to tracking set only if it is new auto result = dev_data->queues.emplace(*pQueue); @@ -5520,15 +5509,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uin pQNode->lastSubmittedId = 0; #endif } - - loader_platform_thread_unlock_mutex(&globalLock); } VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); decrementResources(dev_data, queue); bool skip_call = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Iterate over local set since we erase set members as we go in for loop auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers; for (auto cmdBuffer : local_cb_set) { @@ -5536,15 +5523,15 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { removeInFlightCmdBuffer(dev_data, cmdBuffer, queue); } dev_data->queueMap[queue].inFlightCmdBuffers.clear(); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue); #if MTMERGESOURCE if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); retire_queue_fences(dev_data, queue); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } #endif return result; @@ -5553,7 +5540,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) { VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) { bool skip_call = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (auto queue : dev_data->queues) { decrementResources(dev_data, queue); if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) { @@ -5565,15 +5552,15 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer); } dev_data->globalInFlightCmdBuffers.clear(); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device); #if MTMERGESOURCE if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); retire_device_fences(dev_data, device); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } #endif return result; @@ -5582,7 +5569,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto fence_pair = dev_data->fenceMap.find(fence); if (fence_pair != dev_data->fenceMap.end()) { if (fence_pair->second.in_use.load()) { @@ -5592,7 +5579,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFen } dev_data->fenceMap.erase(fence_pair); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator); @@ -5602,7 +5589,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); auto item = dev_data->semaphoreMap.find(semaphore); if (item != dev_data->semaphoreMap.end()) { if (item->second.in_use.load()) { @@ -5612,14 +5599,13 @@ vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCal } dev_data->semaphoreMap.erase(semaphore); } - loader_platform_thread_unlock_mutex(&globalLock); // TODO : Clean up any internal data structures using this obj. } VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skip_call = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto event_data = dev_data->eventMap.find(event); if (event_data != dev_data->eventMap.end()) { if (event_data->second.in_use.load()) { @@ -5630,7 +5616,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEve } dev_data->eventMap.erase(event_data); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skip_call) dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator); // TODO : Clean up any internal data structures using this obj. @@ -5649,7 +5635,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight; GLOBAL_CB_NODE *pCB = nullptr; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) { pCB = getCBNode(dev_data, cmdBuffer); for (auto queryStatePair : pCB->queryToStateMap) { @@ -5710,7 +5696,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo } } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, @@ -5738,26 +5724,24 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); if (!validateIdleBuffer(dev_data, buffer) && !skipCall) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); } dev_data->bufferMap.erase(buffer); - loader_platform_thread_unlock_mutex(&globalLock); } VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); auto item = dev_data->bufferViewMap.find(bufferView); if (item != dev_data->bufferViewMap.end()) { dev_data->bufferViewMap.erase(item); } - loader_platform_thread_unlock_mutex(&globalLock); } VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { @@ -5766,7 +5750,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkIma if (!skipCall) dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); const auto& entry = dev_data->imageMap.find(image); if (entry != dev_data->imageMap.end()) { // Clear any memory mapping for this image @@ -5784,7 +5768,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkIma } dev_data->imageSubresourceMap.erase(subEntry); } - loader_platform_thread_unlock_mutex(&globalLock); } #if MTMERGESOURCE static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle, @@ -5832,7 +5815,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Track objects tied to memory uint64_t buffer_handle = (uint64_t)(buffer); bool skipCall = @@ -5891,7 +5874,7 @@ vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDevic } } print_mem_list(dev_data); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset); } @@ -5925,11 +5908,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) { layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); - + std::unique_lock<std::mutex> lock(global_lock); my_data->shaderModuleMap.erase(shaderModule); - - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator); } @@ -6006,7 +5987,7 @@ vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t comman layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skip_call = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (uint32_t i = 0; i < commandBufferCount; i++) { auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]); skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free"); @@ -6024,7 +6005,7 @@ vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t comman #if MTMERGESOURCE printCBList(dev_data); #endif - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skip_call) dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers); @@ -6038,10 +6019,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice devi VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags; dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex; - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6052,9 +6032,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool); if (result == VK_SUCCESS) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo; - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6064,7 +6043,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Verify that command buffers in pool are complete (not in-flight) VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with"); // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap @@ -6081,7 +6060,7 @@ vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocat } dev_data->commandPoolMap.erase(commandPool); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (result) return; @@ -6104,13 +6083,12 @@ vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolRese // Reset all of the CBs allocated from this pool if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) { resetCB(dev_data, (*it)); ++it; } - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6119,7 +6097,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, ui layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (uint32_t i = 0; i < fenceCount; ++i) { #if MTMERGESOURCE // Reset fence state in fenceCreateInfo structure @@ -6145,7 +6123,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, ui "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i])); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences); return result; @@ -6154,7 +6132,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, ui VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto fbNode = dev_data->frameBufferMap.find(framebuffer); if (fbNode != dev_data->frameBufferMap.end()) { for (auto cb : fbNode->second.referencingCmdBuffers) { @@ -6168,7 +6146,7 @@ vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocat delete [] fbNode->second.createInfo.pAttachments; dev_data->frameBufferMap.erase(fbNode); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator); } @@ -6176,9 +6154,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->renderPassMap.erase(renderPass); - loader_platform_thread_unlock_mutex(&globalLock); } VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, @@ -6188,11 +6165,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, c VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo; dev_data->bufferMap[*pBuffer].in_use.store(0); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6202,7 +6178,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice devic layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo); #if MTMERGESOURCE // In order to create a valid buffer view, the buffer must have been created with at least one of the @@ -6211,7 +6187,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice devic VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT"); #endif - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6223,7 +6198,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, co VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); IMAGE_LAYOUT_NODE image_node; image_node.layout = pCreateInfo->initialLayout; image_node.format = pCreateInfo->format; @@ -6231,13 +6206,12 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, co ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()}; dev_data->imageSubresourceMap[*pImage].push_back(subpair); dev_data->imageLayoutMap[subpair] = image_node; - loader_platform_thread_unlock_mutex(&globalLock); } return result; } static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) { - /* expects globalLock to be held by caller */ + /* expects global_lock to be held by caller */ auto image_node_it = dev_data->imageMap.find(image); if (image_node_it != dev_data->imageMap.end()) { @@ -6259,7 +6233,7 @@ static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourc // values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS. static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range, VkImage image) { - /* expects globalLock to be held by caller */ + /* expects global_lock to be held by caller */ *levels = range.levelCount; *layers = range.layerCount; @@ -6279,7 +6253,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo); ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image); dev_data->imageViewMap[*pView] = localCI; @@ -6290,7 +6264,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT"); #endif - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6300,7 +6273,7 @@ vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAll layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); FENCE_NODE *pFN = &dev_data->fenceMap[*pFence]; #if MTMERGESOURCE memset(pFN, 0, sizeof(MT_FENCE_INFO)); @@ -6310,7 +6283,6 @@ vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAll } #endif pFN->in_use.store(0); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6380,7 +6352,7 @@ vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); uint32_t i = 0; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (i = 0; i < count; i++) { pPipeNode[i] = new PIPELINE_NODE; @@ -6389,20 +6361,20 @@ vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32 } if (!skipCall) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); for (i = 0; i < count; i++) { pPipeNode[i]->pipeline = pPipelines[i]; dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } else { for (i = 0; i < count; i++) { delete pPipeNode[i]; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return VK_ERROR_VALIDATION_FAILED_EXT; } return result; @@ -6420,7 +6392,7 @@ vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); uint32_t i = 0; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (i = 0; i < count; i++) { // TODO: Verify compute stage bits @@ -6434,21 +6406,21 @@ vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_ } if (!skipCall) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); for (i = 0; i < count; i++) { pPipeNode[i]->pipeline = pPipelines[i]; dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i]; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } else { for (i = 0; i < count; i++) { // Clean up any locally allocated data structures delete pPipeNode[i]; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return VK_ERROR_VALIDATION_FAILED_EXT; } return result; @@ -6459,9 +6431,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo)); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6531,9 +6502,8 @@ vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateIn pNewNode->endIndex = 0; } // Put new node at Head of global Layer list - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode; - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6569,7 +6539,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkP } VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); // TODOSC : Merge capture of the setLayouts per pipeline PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout]; plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount); @@ -6580,7 +6550,6 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkP for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) { plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i]; } - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6603,9 +6572,8 @@ vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreat "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()")) return VK_ERROR_VALIDATION_FAILED_EXT; } else { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode; - loader_platform_thread_unlock_mutex(&globalLock); } } else { // Need to do anything if pool create fails? @@ -6618,9 +6586,8 @@ vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescri layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); clearDescriptorPool(dev_data, device, descriptorPool, flags); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -6630,7 +6597,7 @@ vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAl bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Verify that requested descriptorSets are available in pool DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); if (!pPoolNode) { @@ -6642,12 +6609,12 @@ vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAl skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount, pAllocateInfo->pSetLayouts); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skipCall) return VK_ERROR_VALIDATION_FAILED_EXT; VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool); if (pPoolNode) { if (pAllocateInfo->descriptorSetCount == 0) { @@ -6666,7 +6633,7 @@ vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAl VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return VK_ERROR_VALIDATION_FAILED_EXT; } } else { @@ -6685,7 +6652,7 @@ vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAl "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", (uint64_t)pAllocateInfo->pSetLayouts[i])) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return VK_ERROR_VALIDATION_FAILED_EXT; } } @@ -6700,7 +6667,7 @@ vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAl } } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } return result; } @@ -6710,7 +6677,7 @@ vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); // Make sure that no sets being destroyed are in-flight - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); for (uint32_t i = 0; i < count; ++i) skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets"); DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool); @@ -6721,12 +6688,12 @@ vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skipCall) return VK_ERROR_VALIDATION_FAILED_EXT; VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); // Update available descriptor sets in pool pPoolNode->availableSets += count; @@ -6743,7 +6710,7 @@ vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount; } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } // TODO : Any other clean-up or book-keeping to do here? return result; @@ -6754,9 +6721,9 @@ vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkW uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) { // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!rtn) { dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); @@ -6768,7 +6735,7 @@ vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCr layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool); if (cp_it != dev_data->commandPoolMap.end()) { for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) { @@ -6785,7 +6752,7 @@ vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCr #if MTMERGESOURCE printCBList(dev_data); #endif - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } return result; } @@ -6794,7 +6761,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Validate command buffer level GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { @@ -6915,7 +6882,7 @@ vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginIn (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skipCall) { return VK_ERROR_VALIDATION_FAILED_EXT; } @@ -6928,7 +6895,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffe bool skipCall = false; VkResult result = VK_SUCCESS; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { if (pCB->state != CB_RECORDING) { @@ -6942,9 +6909,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffe } } if (!skipCall) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); if (VK_SUCCESS == result) { pCB->state = CB_RECORDED; // Reset CB status flags @@ -6954,7 +6921,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffe } else { result = VK_ERROR_VALIDATION_FAILED_EXT; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return result; } @@ -6962,7 +6929,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) { bool skip_call = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); VkCommandPool cmdPool = pCB->createInfo.commandPool; if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) { @@ -6973,14 +6940,14 @@ vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags fl (uint64_t)commandBuffer, (uint64_t)cmdPool); } skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset"); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (skip_call) return VK_ERROR_VALIDATION_FAILED_EXT; VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); resetCB(dev_data, commandBuffer); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } return result; } @@ -6992,7 +6959,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); @@ -7016,7 +6983,7 @@ vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBin "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline)); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); } @@ -7025,7 +6992,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()"); @@ -7033,7 +7000,7 @@ vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t pCB->viewports.resize(viewportCount); memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport)); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports); } @@ -7042,7 +7009,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()"); @@ -7050,7 +7017,7 @@ vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t s pCB->scissors.resize(scissorCount); memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D)); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors); } @@ -7058,13 +7025,13 @@ vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t s VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()"); pCB->status |= CBSTATUS_LINE_WIDTH_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth); } @@ -7073,13 +7040,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()"); pCB->status |= CBSTATUS_DEPTH_BIAS_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); @@ -7088,13 +7055,13 @@ vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()"); pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants); } @@ -7103,13 +7070,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()"); pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds); } @@ -7118,13 +7085,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()"); pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask); } @@ -7133,13 +7100,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()"); pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask); } @@ -7148,13 +7115,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()"); pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference); } @@ -7165,7 +7132,7 @@ vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipel const uint32_t *pDynamicOffsets) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { if (pCB->state == CB_RECORDING) { @@ -7315,7 +7282,7 @@ vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipel skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()"); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); @@ -7325,7 +7292,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; skipCall = @@ -7360,7 +7327,7 @@ vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSiz } pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType); } @@ -7382,7 +7349,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffe const VkDeviceSize *pOffsets) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE for (uint32_t i = 0; i < bindingCount; ++i) { VkDeviceMemory mem; @@ -7402,12 +7369,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffe } else { skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets); } -/* expects globalLock to be held by caller */ +/* expects global_lock to be held by caller */ static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) { bool skip_call = false; @@ -7442,7 +7409,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuff uint32_t firstVertex, uint32_t firstInstance) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()"); @@ -7459,7 +7426,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuff } skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); } @@ -7469,7 +7436,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer comm uint32_t firstInstance) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()"); @@ -7486,7 +7453,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer comm } skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); @@ -7496,7 +7463,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; // MTMTODO : merge with code below @@ -7520,7 +7487,7 @@ vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize o } skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride); } @@ -7529,7 +7496,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; // MTMTODO : merge with code below @@ -7554,7 +7521,7 @@ vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDevic } skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); } @@ -7562,7 +7529,7 @@ vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDevic VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders @@ -7573,7 +7540,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer command skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z); } @@ -7582,7 +7549,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; skipCall = @@ -7599,7 +7566,7 @@ vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSi skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset); } @@ -7608,7 +7575,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer comma uint32_t regionCount, const VkBufferCopy *pRegions) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7640,7 +7607,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer comma skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions); } @@ -7727,7 +7694,7 @@ vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout sr VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7762,7 +7729,7 @@ vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout sr skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); @@ -7773,7 +7740,7 @@ vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout sr VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7804,7 +7771,7 @@ vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout sr skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); @@ -7815,7 +7782,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffe uint32_t regionCount, const VkBufferImageCopy *pRegions) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7849,7 +7816,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffe skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); @@ -7860,7 +7827,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffe uint32_t regionCount, const VkBufferImageCopy *pRegions) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7896,7 +7863,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffe skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); @@ -7906,7 +7873,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer com VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7929,7 +7896,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer com skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData); } @@ -7938,7 +7905,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE VkDeviceMemory mem; auto cb_data = dev_data->commandBufferMap.find(commandBuffer); @@ -7961,7 +7928,7 @@ vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data); } @@ -7971,7 +7938,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer const VkClearRect *pRects) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()"); @@ -8031,7 +7998,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer } } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects); } @@ -8041,7 +8008,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer uint32_t rangeCount, const VkImageSubresourceRange *pRanges) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state VkDeviceMemory mem; @@ -8061,7 +8028,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges); } @@ -8072,7 +8039,7 @@ vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImag const VkImageSubresourceRange *pRanges) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state VkDeviceMemory mem; @@ -8092,7 +8059,7 @@ vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImag skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges); @@ -8103,7 +8070,7 @@ vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE auto cb_data = dev_data->commandBufferMap.find(commandBuffer); VkDeviceMemory mem; @@ -8129,7 +8096,7 @@ vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()"); skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); @@ -8152,7 +8119,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()"); @@ -8162,7 +8129,7 @@ vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask); pCB->eventUpdates.push_back(eventUpdate); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask); } @@ -8171,7 +8138,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()"); @@ -8181,7 +8148,7 @@ vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFla std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0)); pCB->eventUpdates.push_back(eventUpdate); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask); } @@ -8560,7 +8527,7 @@ vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEven uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { auto firstEventIndex = pCB->events.size(); @@ -8581,7 +8548,7 @@ vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEven ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, @@ -8595,7 +8562,7 @@ vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStag uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); @@ -8604,7 +8571,7 @@ vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStag ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, @@ -8615,7 +8582,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { QueryObject query = {queryPool, slot}; @@ -8625,7 +8592,7 @@ vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t s } skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags); } @@ -8633,7 +8600,7 @@ vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t s VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { QueryObject query = {queryPool, slot}; @@ -8652,7 +8619,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer command skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()"); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot); } @@ -8661,7 +8628,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { for (uint32_t i = 0; i < queryCount; i++) { @@ -8676,7 +8643,7 @@ vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32 } skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount); } @@ -8686,7 +8653,7 @@ vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); #if MTMERGESOURCE VkDeviceMemory mem; @@ -8722,7 +8689,7 @@ vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, } skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags); @@ -8733,7 +8700,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer co const void *pValues) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { if (pCB->state == CB_RECORDING) { @@ -8746,7 +8713,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer co skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()"); } // TODO : Add warning if push constant update doesn't align with range - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues); } @@ -8755,7 +8722,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { QueryObject query = {queryPool, slot}; @@ -8766,7 +8733,7 @@ vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipel skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()"); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot); } @@ -8778,7 +8745,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice devi VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer); if (VK_SUCCESS == result) { // Shadow create info and store in map - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); auto & fbNode = dev_data->frameBufferMap[*pFramebuffer]; fbNode.createInfo = *pCreateInfo; @@ -8801,8 +8768,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice devi fb_info.image = view_data->second.image; fbNode.attachments.push_back(fb_info); } - - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -9137,9 +9102,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice dev VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule); if (res == VK_SUCCESS) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo)); - loader_platform_thread_unlock_mutex(&globalLock); } return res; } @@ -9149,7 +9113,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic VkRenderPass *pRenderPass) { bool skip_call = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // Create DAG std::vector<bool> has_self_dependency(pCreateInfo->subpassCount); std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount); @@ -9157,13 +9121,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic // Validate skip_call |= ValidateLayouts(dev_data, device, pCreateInfo); if (skip_call) { - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); return VK_ERROR_VALIDATION_FAILED_EXT; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); if (VK_SUCCESS == result) { - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); // TODOSC : Merge in tracking of renderpass from shader_checker // Shadow create info and store in map VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo); @@ -9300,7 +9264,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic } } #endif - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } return result; } @@ -9460,7 +9424,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { if (pRenderPassBegin && pRenderPassBegin->renderPass) { @@ -9539,7 +9503,7 @@ vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()"); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); } @@ -9548,7 +9512,7 @@ vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass"); @@ -9562,7 +9526,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer comm } skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents); } @@ -9570,7 +9534,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer comm VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE auto cb_data = dev_data->commandBufferMap.find(commandBuffer); if (cb_data != dev_data->commandBufferMap.end()) { @@ -9609,7 +9573,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer co pCB->activeRenderPass = 0; pCB->activeSubpass = 0; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer); } @@ -9837,7 +9801,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) { bool skipCall = false; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer); if (pCB) { GLOBAL_CB_NODE *pSubCB = NULL; @@ -9938,7 +9902,7 @@ vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands"); skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()"); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers); } @@ -9969,7 +9933,7 @@ vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSi bool skip_call = false; VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); #if MTMERGESOURCE DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem); if (pMemObj) { @@ -9986,14 +9950,14 @@ vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSi storeMemRanges(dev_data, mem, offset, size); #endif skip_call |= ValidateMapImageLayouts(device, mem); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skip_call) { result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData); #if MTMERGESOURCE - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); initializeAndTrackMemory(dev_data, mem, size, ppData); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); #endif } return result; @@ -10004,9 +9968,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDevi layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); skipCall |= deleteMemRanges(my_data, mem); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { my_data->device_dispatch_table->UnmapMemory(device, mem); } @@ -10081,10 +10045,10 @@ vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappe bool skipCall = false; layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges); skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges); } @@ -10097,9 +10061,9 @@ vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const Vk bool skipCall = false; layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges); } @@ -10111,29 +10075,29 @@ VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto image_node = dev_data->imageMap.find(image); if (image_node != dev_data->imageMap.end()) { // Track objects tied to memory uint64_t image_handle = reinterpret_cast<uint64_t&>(image); skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory"); VkMemoryRequirements memRequirements; - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements, dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT); print_mem_list(dev_data); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset); - loader_platform_thread_lock_mutex(&globalLock); + lock.lock(); dev_data->memObjMap[mem].image = image; image_node->second.mem = mem; image_node->second.memOffset = memoryOffset; image_node->second.memSize = memRequirements.size; - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); } } else { log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, @@ -10148,7 +10112,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) { bool skip_call = false; VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto event_node = dev_data->eventMap.find(event); if (event_node != dev_data->eventMap.end()) { event_node->second.needsSignaled = false; @@ -10160,7 +10124,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) { reinterpret_cast<const uint64_t &>(event)); } } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297) @@ -10180,7 +10144,7 @@ vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map); VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; bool skip_call = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); // First verify that fence is not in use if ((fence != VK_NULL_HANDLE) && (bindInfoCount != 0) && dev_data->fenceMap[fence].in_use.load()) { skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, @@ -10247,7 +10211,7 @@ vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo } } print_mem_list(dev_data); - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skip_call) return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence); @@ -10260,12 +10224,11 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaph layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore); if (result == VK_SUCCESS) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore]; sNode->signaled = false; sNode->queue = VK_NULL_HANDLE; sNode->in_use.store(0); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -10275,11 +10238,10 @@ vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAll layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent); if (result == VK_SUCCESS) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->eventMap[*pEvent].needsSignaled = false; dev_data->eventMap[*pEvent].in_use.store(0); dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0); - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -10292,9 +10254,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice dev if (VK_SUCCESS == result) { SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node; - loader_platform_thread_unlock_mutex(&globalLock); } return result; @@ -10305,7 +10266,7 @@ vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocat layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain); if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) { if (swapchain_data->second->images.size() > 0) { @@ -10328,7 +10289,7 @@ vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocat delete swapchain_data->second; dev_data->device_extensions.swapchainMap.erase(swapchain); } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator); } @@ -10342,7 +10303,7 @@ vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCo // This should never happen and is checked by param checker. if (!pCount) return result; - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); const size_t count = *pCount; auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain]; if (!swapchain_node->images.empty()) { @@ -10374,7 +10335,6 @@ vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCo dev_data->imageLayoutMap[subpair] = image_layout_node; dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain; } - loader_platform_thread_unlock_mutex(&globalLock); } return result; } @@ -10385,7 +10345,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, bool skip_call = false; if (pPresentInfo) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i]; if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { @@ -10426,7 +10386,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, } } } - loader_platform_thread_unlock_mutex(&globalLock); } if (!skip_call) @@ -10441,7 +10400,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchai VkResult result = VK_ERROR_VALIDATION_FAILED_EXT; bool skipCall = false; - loader_platform_thread_lock_mutex(&globalLock); + std::unique_lock<std::mutex> lock(global_lock); if (semaphore != VK_NULL_HANDLE && dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) { if (dev_data->semaphoreMap[semaphore].signaled) { @@ -10455,7 +10414,7 @@ VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchai if (fence_data != dev_data->fenceMap.end()) { fence_data->second.swapchain = swapchain; } - loader_platform_thread_unlock_mutex(&globalLock); + lock.unlock(); if (!skipCall) { result = @@ -10472,9 +10431,8 @@ vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackC VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); if (VK_SUCCESS == res) { - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback); - loader_platform_thread_unlock_mutex(&globalLock); } return res; } @@ -10485,9 +10443,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkIns layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table; pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); - loader_platform_thread_lock_mutex(&globalLock); + std::lock_guard<std::mutex> lock(global_lock); layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator); - loader_platform_thread_unlock_mutex(&globalLock); } VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL |
