diff options
| -rw-r--r-- | demos/cube.c | 24 | ||||
| -rw-r--r-- | demos/tri.c | 20 | ||||
| -rw-r--r-- | icd/nulldrv/nulldrv.c | 9 | ||||
| -rw-r--r-- | include/vkLayer.h | 6 | ||||
| -rw-r--r-- | include/vulkan.h | 20 | ||||
| -rw-r--r-- | layers/mem_tracker.cpp | 72 | ||||
| -rw-r--r-- | layers/param_checker.cpp | 14 | ||||
| -rwxr-xr-x | vulkan.py | 15 |
8 files changed, 108 insertions, 72 deletions
diff --git a/demos/cube.c b/demos/cube.c index dab663ca..637cd312 100644 --- a/demos/cube.c +++ b/demos/cube.c @@ -641,7 +641,7 @@ static void demo_prepare_depth(struct demo *demo) assert(!err); /* bind memory */ - err = vkBindObjectMemory(demo->depth.image, i, + err = vkQueueBindObjectMemory(demo->queue, demo->depth.image, i, demo->depth.mem[i], 0); assert(!err); } @@ -874,7 +874,7 @@ static void demo_prepare_texture_image(struct demo *demo, assert(!err); /* bind memory */ - err = vkBindObjectMemory(tex_obj->image, j, tex_obj->mem[j], 0); + err = vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, tex_obj->mem[j], 0); assert(!err); } free(mem_reqs); @@ -917,11 +917,11 @@ static void demo_prepare_texture_image(struct demo *demo, /* setting the image layout does not reference the actual memory so no need to add a mem ref */ } -static void demo_destroy_texture_image(struct texture_object *tex_objs) +static void demo_destroy_texture_image(struct demo *demo, struct texture_object *tex_objs) { /* clean up staging resources */ for (uint32_t j = 0; j < tex_objs->num_mem; j ++) { - vkBindObjectMemory(tex_objs->image, j, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, tex_objs->image, j, VK_NULL_HANDLE, 0); vkFreeMemory(tex_objs->mem[j]); } @@ -988,7 +988,7 @@ static void demo_prepare_textures(struct demo *demo) demo_flush_init_cmd(demo); - demo_destroy_texture_image(&staging_texture); + demo_destroy_texture_image(demo, &staging_texture); demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem); } else { /* Can't support VK_FMT_B8G8R8A8_UNORM !? */ @@ -1108,7 +1108,7 @@ void demo_prepare_cube_data_buffer(struct demo *demo) err = vkUnmapMemory(demo->uniform_data.mem[i]); assert(!err); - err = vkBindObjectMemory(demo->uniform_data.buf, i, + err = vkQueueBindObjectMemory(demo->queue, demo->uniform_data.buf, i, demo->uniform_data.mem[i], 0); assert(!err); } @@ -1752,8 +1752,12 @@ static void demo_init_vk(struct demo *demo) queue_count = (uint32_t)(data_size / sizeof(VkPhysicalGpuQueueProperties)); assert(queue_count >= 1); + // Graphics queue and MemMgr queue can be separate. + // TODO: Add support for separate queues, including synchronization, + // and appropriate tracking for QueueSubmit and QueueBindObjectMemory for (i = 0; i < queue_count; i++) { - if (demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) + if ((demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) && + (demo->queue_props[i].queueFlags & VK_QUEUE_MEMMGR_BIT) ) break; } assert(i < queue_count); @@ -1833,7 +1837,7 @@ static void demo_cleanup(struct demo *demo) for (i = 0; i < DEMO_TEXTURE_COUNT; i++) { vkDestroyObject(demo->textures[i].view); - vkBindObjectMemory(demo->textures[i].image, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->textures[i].image, 0, VK_NULL_HANDLE, 0); vkDestroyObject(demo->textures[i].image); demo_remove_mem_refs(demo, demo->textures[i].num_mem, demo->textures[i].mem); for (j = 0; j < demo->textures[i].num_mem; j++) @@ -1842,7 +1846,7 @@ static void demo_cleanup(struct demo *demo) } vkDestroyObject(demo->depth.view); - vkBindObjectMemory(demo->depth.image, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->depth.image, 0, VK_NULL_HANDLE, 0); vkDestroyObject(demo->depth.image); demo_remove_mem_refs(demo, demo->depth.num_mem, demo->depth.mem); for (j = 0; j < demo->depth.num_mem; j++) { @@ -1850,7 +1854,7 @@ static void demo_cleanup(struct demo *demo) } vkDestroyObject(demo->uniform_data.view); - vkBindObjectMemory(demo->uniform_data.buf, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->uniform_data.buf, 0, VK_NULL_HANDLE, 0); vkDestroyObject(demo->uniform_data.buf); demo_remove_mem_refs(demo, demo->uniform_data.num_mem, demo->uniform_data.mem); for (j = 0; j < demo->uniform_data.num_mem; j++) diff --git a/demos/tri.c b/demos/tri.c index 8d212098..f1156d28 100644 --- a/demos/tri.c +++ b/demos/tri.c @@ -450,7 +450,7 @@ static void demo_prepare_depth(struct demo *demo) assert(!err); /* bind memory */ - err = vkBindObjectMemory(demo->depth.image, i, + err = vkQueueBindObjectMemory(demo->queue, demo->depth.image, i, demo->depth.mem[i], 0); assert(!err); } @@ -532,7 +532,7 @@ static void demo_prepare_texture_image(struct demo *demo, assert(!err); /* bind memory */ - err = vkBindObjectMemory(tex_obj->image, j, tex_obj->mem[j], 0); + err = vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, tex_obj->mem[j], 0); assert(!err); } free(mem_reqs); @@ -578,11 +578,11 @@ static void demo_prepare_texture_image(struct demo *demo, /* setting the image layout does not reference the actual memory so no need to add a mem ref */ } -static void demo_destroy_texture_image(struct texture_object *tex_obj) +static void demo_destroy_texture_image(struct demo *demo, struct texture_object *tex_obj) { /* clean up staging resources */ for (uint32_t j = 0; j < tex_obj->num_mem; j ++) { - vkBindObjectMemory(tex_obj->image, j, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, VK_NULL_HANDLE, 0); vkFreeMemory(tex_obj->mem[j]); } @@ -651,7 +651,7 @@ static void demo_prepare_textures(struct demo *demo) demo_flush_init_cmd(demo); - demo_destroy_texture_image(&staging_texture); + demo_destroy_texture_image(demo, &staging_texture); demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem); } else { /* Can't support VK_FMT_B8G8R8A8_UNORM !? */ @@ -760,7 +760,7 @@ static void demo_prepare_vertices(struct demo *demo) err = vkUnmapMemory(demo->vertices.mem[i]); assert(!err); - err = vkBindObjectMemory(demo->vertices.buf, i, demo->vertices.mem[i], 0); + err = vkQueueBindObjectMemory(demo->queue, demo->vertices.buf, i, demo->vertices.mem[i], 0); assert(!err); } @@ -1296,6 +1296,8 @@ static void demo_init_vk(struct demo *demo) for (i = 0; i < queue_count; i++) { if (demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) break; + if (demo->queue_props[i].queueFlags & VK_QUEUE_MEMMGR_BIT) + break; } assert(i < queue_count); demo->graphics_queue_node_index = i; @@ -1362,7 +1364,7 @@ static void demo_cleanup(struct demo *demo) vkDestroyObject(demo->desc_layout_chain); vkDestroyObject(demo->desc_layout); - vkBindObjectMemory(demo->vertices.buf, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->vertices.buf, 0, VK_NULL_HANDLE, 0); vkDestroyObject(demo->vertices.buf); demo_remove_mem_refs(demo, demo->vertices.num_mem, demo->vertices.mem); for (j = 0; j < demo->vertices.num_mem; j++) @@ -1370,7 +1372,7 @@ static void demo_cleanup(struct demo *demo) for (i = 0; i < DEMO_TEXTURE_COUNT; i++) { vkDestroyObject(demo->textures[i].view); - vkBindObjectMemory(demo->textures[i].image, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->textures[i].image, 0, VK_NULL_HANDLE, 0); vkDestroyObject(demo->textures[i].image); demo_remove_mem_refs(demo, demo->textures[i].num_mem, demo->textures[i].mem); for (j = 0; j < demo->textures[i].num_mem; j++) @@ -1380,7 +1382,7 @@ static void demo_cleanup(struct demo *demo) } vkDestroyObject(demo->depth.view); - vkBindObjectMemory(demo->depth.image, 0, VK_NULL_HANDLE, 0); + vkQueueBindObjectMemory(demo->queue, demo->depth.image, 0, VK_NULL_HANDLE, 0); demo_remove_mem_refs(demo, demo->depth.num_mem, demo->depth.mem); vkDestroyObject(demo->depth.image); for (j = 0; j < demo->depth.num_mem; j++) diff --git a/icd/nulldrv/nulldrv.c b/icd/nulldrv/nulldrv.c index e80ec02a..38102c17 100644 --- a/icd/nulldrv/nulldrv.c +++ b/icd/nulldrv/nulldrv.c @@ -1579,7 +1579,8 @@ ICD_EXPORT VkResult VKAPI vkGetObjectInfo( return base->get_info(base, infoType, pDataSize, pData); } -ICD_EXPORT VkResult VKAPI vkBindObjectMemory( +ICD_EXPORT VkResult VKAPI vkQueueBindObjectMemory( + VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem_, @@ -1589,7 +1590,8 @@ ICD_EXPORT VkResult VKAPI vkBindObjectMemory( return VK_SUCCESS; } -ICD_EXPORT VkResult VKAPI vkBindObjectMemoryRange( +ICD_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange( + VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, @@ -1601,7 +1603,8 @@ ICD_EXPORT VkResult VKAPI vkBindObjectMemoryRange( return VK_SUCCESS; } -ICD_EXPORT VkResult VKAPI vkBindImageMemoryRange( +ICD_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange( + VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, diff --git a/include/vkLayer.h b/include/vkLayer.h index e58ccdcd..c15237eb 100644 --- a/include/vkLayer.h +++ b/include/vkLayer.h @@ -56,9 +56,9 @@ typedef struct VkLayerDispatchTable_ PFN_vkOpenPeerImage OpenPeerImage; PFN_vkDestroyObject DestroyObject; PFN_vkGetObjectInfo GetObjectInfo; - PFN_vkBindObjectMemory BindObjectMemory; - PFN_vkBindObjectMemoryRange BindObjectMemoryRange; - PFN_vkBindImageMemoryRange BindImageMemoryRange; + PFN_vkQueueBindObjectMemory QueueBindObjectMemory; + PFN_vkQueueBindObjectMemoryRange QueueBindObjectMemoryRange; + PFN_vkQueueBindImageMemoryRange QueueBindImageMemoryRange; PFN_vkCreateFence CreateFence; PFN_vkGetFenceStatus GetFenceStatus; PFN_vkResetFences ResetFences; diff --git a/include/vulkan.h b/include/vulkan.h index 8d0d5d5b..2c538572 100644 --- a/include/vulkan.h +++ b/include/vulkan.h @@ -1075,6 +1075,7 @@ typedef enum VkQueueFlags_ VK_QUEUE_GRAPHICS_BIT = 0x00000001, // Queue supports graphics operations VK_QUEUE_COMPUTE_BIT = 0x00000002, // Queue supports compute operations VK_QUEUE_DMA_BIT = 0x00000004, // Queue supports DMA operations + VK_QUEUE_MEMMGR_BIT = 0x00000008, // Queue supports memory management operations VK_QUEUE_EXTENDED_BIT = 0x40000000, // Extended queue VK_MAX_ENUM(VkQueueFlags) } VkQueueFlags; @@ -1427,7 +1428,7 @@ typedef struct VkMemoryRequirements_ { VkGpuSize size; // Specified in bytes VkGpuSize alignment; // Specified in bytes - VkGpuSize granularity; // Granularity on which vkBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size) + VkGpuSize granularity; // Granularity on which vkQueueBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size) VkFlags memProps; // VkMemoryPropertyFlags } VkMemoryRequirements; @@ -2213,9 +2214,9 @@ typedef VkResult (VKAPI *PFN_vkOpenPeerMemory)(VkDevice device, const VkPeerMemo typedef VkResult (VKAPI *PFN_vkOpenPeerImage)(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem); typedef VkResult (VKAPI *PFN_vkDestroyObject)(VkObject object); typedef VkResult (VKAPI *PFN_vkGetObjectInfo)(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData); -typedef VkResult (VKAPI *PFN_vkBindObjectMemory)(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset); -typedef VkResult (VKAPI *PFN_vkBindObjectMemoryRange)(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset); -typedef VkResult (VKAPI *PFN_vkBindImageMemoryRange)(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset); +typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemory)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset); +typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemoryRange)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset); +typedef VkResult (VKAPI *PFN_vkQueueBindImageMemoryRange)(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset); typedef VkResult (VKAPI *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence); typedef VkResult (VKAPI *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, VkFence* pFences); typedef VkResult (VKAPI *PFN_vkGetFenceStatus)(VkFence fence); @@ -2453,13 +2454,17 @@ VkResult VKAPI vkGetObjectInfo( size_t* pDataSize, void* pData); -VkResult VKAPI vkBindObjectMemory( +// Memory namagement API functions + +VkResult VKAPI vkQueueBindObjectMemory( + VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize memOffset); -VkResult VKAPI vkBindObjectMemoryRange( +VkResult VKAPI vkQueueBindObjectMemoryRange( + VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, @@ -2467,7 +2472,8 @@ VkResult VKAPI vkBindObjectMemoryRange( VkGpuMemory mem, VkGpuSize memOffset); -VkResult VKAPI vkBindImageMemoryRange( +VkResult VKAPI vkQueueBindImageMemoryRange( + VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, diff --git a/layers/mem_tracker.cpp b/layers/mem_tracker.cpp index c5f83d23..d67887e0 100644 --- a/layers/mem_tracker.cpp +++ b/layers/mem_tracker.cpp @@ -490,36 +490,50 @@ static bool32_t deleteCBInfoList() } // For given MemObjInfo, report Obj & CB bindings -static void reportMemReferences(const MT_MEM_OBJ_INFO* pMemObjInfo) +static void reportMemReferencesAndCleanUp(MT_MEM_OBJ_INFO* pMemObjInfo) { - uint32_t refCount = 0; // Count found references + uint32_t cmdBufRefCount = pMemObjInfo->pCmdBufferBindings.size(); + uint32_t objRefCount = pMemObjInfo->pObjBindings.size(); - for (list<VkCmdBuffer>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) { - refCount++; + if ((pMemObjInfo->pCmdBufferBindings.size() + pMemObjInfo->pObjBindings.size()) != 0) { char str[1024]; - sprintf(str, "Command Buffer %p has reference to mem obj %p", (*it), pMemObjInfo->mem); - layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str); + sprintf(str, "Attempting to free memory object %p which still contains %d references", pMemObjInfo->mem, (cmdBufRefCount + objRefCount)); + layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, pMemObjInfo->mem, 0, MEMTRACK_INTERNAL_ERROR, "MEM", str); } - for (list<VkObject>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) { - char str[1024]; - sprintf(str, "VK Object %p has reference to mem obj %p", (*it), pMemObjInfo->mem); - layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str); + + if (cmdBufRefCount > 0) { + for (list<VkCmdBuffer>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) { + char str[1024]; + sprintf(str, "Command Buffer %p still has a reference to mem obj %p", (*it), pMemObjInfo->mem); + layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str); + } + // Clear the list of hanging references + pMemObjInfo->pCmdBufferBindings.clear(); } - if (refCount != pMemObjInfo->refCount) { - char str[1024]; - sprintf(str, "Refcount of %u for Mem Obj %p does't match reported refs of %u", pMemObjInfo->refCount, pMemObjInfo->mem, refCount); - layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, pMemObjInfo->mem, 0, MEMTRACK_INTERNAL_ERROR, "MEM", str); + + if (objRefCount > 0) { + for (list<VkObject>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) { + char str[1024]; + sprintf(str, "VK Object %p still has a reference to mem obj %p", (*it), pMemObjInfo->mem); + layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str); + } + // Clear the list of hanging references + pMemObjInfo->pObjBindings.clear(); } } static void deleteMemObjInfo(VkGpuMemory mem) { - MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem]; if (memObjMap.find(mem) != memObjMap.end()) { MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem]; delete pDelInfo; memObjMap.erase(mem); } + else { + char str[1024]; + sprintf(str, "Request to delete memory object %p not present in memory Object Map", mem); + layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_INVALID_MEM_OBJ, "MEM", str); + } } // Check if fence for given CB is completed @@ -582,7 +596,7 @@ static bool32_t freeMemObjInfo(VkGpuMemory mem, bool internal) char str[1024]; sprintf(str, "Freeing mem obj %p while it still has references", (void*)mem); layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_FREED_MEM_REF, "MEM", str); - reportMemReferences(pInfo); + reportMemReferencesAndCleanUp(pInfo); result = VK_FALSE; } // Delete mem obj info @@ -610,12 +624,16 @@ static bool32_t clearObjectBinding(VkObject object) sprintf(str, "Attempting to clear mem binding on obj %p but it has no binding.", (void*)object); layerCbMsg(VK_DBG_MSG_WARNING, VK_VALIDATION_LEVEL_0, object, 0, MEMTRACK_MEM_OBJ_CLEAR_EMPTY_BINDINGS, "MEM", str); } else { + // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the memObj's refcount + // and set the objects memory binding pointer to NULL. for (list<VkObject>::iterator it = pObjInfo->pMemObjInfo->pObjBindings.begin(); it != pObjInfo->pMemObjInfo->pObjBindings.end(); ++it) { - pObjInfo->pMemObjInfo->refCount--; - pObjInfo->pMemObjInfo = NULL; - it = pObjInfo->pMemObjInfo->pObjBindings.erase(it); - result = VK_TRUE; - break; + if ((*it) == object) { + pObjInfo->pMemObjInfo->refCount--; + pObjInfo->pMemObjInfo->pObjBindings.erase(it); + pObjInfo->pMemObjInfo = NULL; + result = VK_TRUE; + break; + } } if (result == VK_FALSE) { char str[1024]; @@ -1168,7 +1186,7 @@ VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object) } else { char str[1024]; - sprintf(str, "Destroying obj %p that is still bound to memory object %p\nYou should first clear binding by calling vkBindObjectMemory(%p, 0, VK_NULL_HANDLE, 0)", object, (void*)pDelInfo->pMemObjInfo->mem, object); + sprintf(str, "Destroying obj %p that is still bound to memory object %p\nYou should first clear binding by calling vkQueueBindObjectMemory(queue, %p, 0, VK_NULL_HANDLE, 0)", object, (void*)pDelInfo->pMemObjInfo->mem, object); layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, object, 0, MEMTRACK_DESTROY_OBJECT_ERROR, "MEM", str); // From the spec : If an object has previous memory binding, it is required to unbind memory from an API object before it is destroyed. clearObjectBinding(object); @@ -1186,15 +1204,15 @@ VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object) VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData) { // TODO : What to track here? - // Could potentially save returned mem requirements and validate values passed into BindObjectMemory for this object + // Could potentially save returned mem requirements and validate values passed into QueueBindObjectMemory for this object // From spec : The only objects that are guaranteed to have no external memory requirements are devices, queues, command buffers, shaders and memory objects. VkResult result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData); return result; } -VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset) +VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset) { - VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset); + VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset); loader_platform_thread_lock_mutex(&globalLock); // Track objects tied to memory if (VK_FALSE == updateObjectBinding(object, mem)) { @@ -2013,8 +2031,8 @@ VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcNam return (void*) vkDestroyObject; if (!strcmp(funcName, "vkGetObjectInfo")) return (void*) vkGetObjectInfo; - if (!strcmp(funcName, "vkBindObjectMemory")) - return (void*) vkBindObjectMemory; + if (!strcmp(funcName, "vkQueueBindObjectMemory")) + return (void*) vkQueueBindObjectMemory; if (!strcmp(funcName, "vkCreateFence")) return (void*) vkCreateFence; if (!strcmp(funcName, "vkGetFenceStatus")) diff --git a/layers/param_checker.cpp b/layers/param_checker.cpp index 2fe5ee5e..008aa7be 100644 --- a/layers/param_checker.cpp +++ b/layers/param_checker.cpp @@ -514,32 +514,32 @@ VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfo return result; } -VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset) +VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset) { - VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset); + VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset); return result; } -VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemoryRange(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset) +VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset) { - VkResult result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset); + VkResult result = nextTable.QueueBindObjectMemoryRange(queue, object, allocationIdx, rangeOffset, rangeSize, mem, memOffset); return result; } -VK_LAYER_EXPORT VkResult VKAPI vkBindImageMemoryRange(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset) +VK_LAYER_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset) { char str[1024]; if (!bindInfo) { - sprintf(str, "Struct ptr parameter bindInfo to function BindImageMemoryRange is NULL."); + sprintf(str, "Struct ptr parameter bindInfo to function QueueBindImageMemoryRange is NULL."); layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str); } else if (!vk_validate_vkimagememorybindinfo(bindInfo)) { sprintf(str, "Parameter bindInfo to function BindImageMemoryRange contains an invalid value."); layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str); } - VkResult result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset); + VkResult result = nextTable.QueueBindImageMemoryRange(queue, image, allocationIdx, bindInfo, mem, memOffset); return result; } @@ -354,22 +354,25 @@ core = Extension( Param("size_t*", "pDataSize"), Param("void*", "pData")]), - Proto("VkResult", "BindObjectMemory", - [Param("VkObject", "object"), + Proto("VkResult", "QueueBindObjectMemory", + [Param("VkQueue", "queue"), + Param("VkObject", "object"), Param("uint32_t", "allocationIdx"), Param("VkGpuMemory", "mem"), Param("VkGpuSize", "offset")]), - Proto("VkResult", "BindObjectMemoryRange", - [Param("VkObject", "object"), + Proto("VkResult", "QueueBindObjectMemoryRange", + [Param("VkQueue", "queue"), + Param("VkObject", "object"), Param("uint32_t", "allocationIdx"), Param("VkGpuSize", "rangeOffset"), Param("VkGpuSize", "rangeSize"), Param("VkGpuMemory", "mem"), Param("VkGpuSize", "memOffset")]), - Proto("VkResult", "BindImageMemoryRange", - [Param("VkImage", "image"), + Proto("VkResult", "QueueBindImageMemoryRange", + [Param("VkQueue", "queue"), + Param("VkImage", "image"), Param("uint32_t", "allocationIdx"), Param("const VkImageMemoryBindInfo*", "bindInfo"), Param("VkGpuMemory", "mem"), |
