aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCourtney Goeltzenleuchter <courtney@LunarG.com>2015-04-10 08:34:15 -0600
committerChia-I Wu <olv@lunarg.com>2015-04-16 17:48:19 +0800
commit78351a69dfc6e3aca0490c4de282dc84d66db382 (patch)
tree622ee0ae1f7182bcc4ba00ba92ca796a02074477
parent1e1bc3c8a23a245aca2cf3b6779e88afa2112e16 (diff)
downloadusermoji-78351a69dfc6e3aca0490c4de282dc84d66db382.tar.xz
vulkan: beautification changes
TODO: verify vk-layer-generate.py TODO: need to rename object tracker generator v2: fix "python3 vulkan.py" fix dead cod in tri introduced by rebase beautify wsi_null.c (olv)
-rw-r--r--demos/cube.c320
-rw-r--r--demos/tri.c306
-rw-r--r--demos/vulkaninfo.c74
-rw-r--r--docs/vk_ds.dot20
-rw-r--r--docs/vk_full_pipeline_ds.dot72
-rw-r--r--docs/vk_graphics_pipeline.dot52
-rw-r--r--icd/README.md2
-rw-r--r--icd/common/icd-format.c20
-rw-r--r--icd/common/icd-format.h30
-rw-r--r--icd/common/icd-instance.c20
-rw-r--r--icd/common/icd-instance.h18
-rw-r--r--icd/nulldrv/nulldrv.c954
-rw-r--r--icd/nulldrv/nulldrv.h14
-rw-r--r--include/vkDbg.h56
-rw-r--r--include/vkLayer.h260
-rw-r--r--include/vkWsiX11Ext.h42
-rw-r--r--include/vk_platform.h6
-rw-r--r--include/vulkan.h2378
-rw-r--r--layers/basic.cpp36
-rw-r--r--layers/draw_state.cpp694
-rw-r--r--layers/draw_state.h106
-rw-r--r--layers/glave_snapshot.c424
-rw-r--r--layers/glave_snapshot.h14
-rw-r--r--layers/layers_msg.h4
-rw-r--r--layers/mem_tracker.cpp526
-rw-r--r--layers/mem_tracker.h44
-rw-r--r--layers/multi.cpp62
-rw-r--r--layers/object_track.h4
-rw-r--r--layers/param_checker.cpp560
-rw-r--r--loader/loader.c86
-rw-r--r--loader/loader.h6
-rwxr-xr-xvk-generate.py10
-rwxr-xr-xvk-layer-generate.py76
-rwxr-xr-xvk_helper.py110
-rwxr-xr-xvulkan.py966
35 files changed, 4184 insertions, 4188 deletions
diff --git a/demos/cube.c b/demos/cube.c
index 80ed290d..5faae4bb 100644
--- a/demos/cube.c
+++ b/demos/cube.c
@@ -22,14 +22,14 @@
* structure to track all objects related to a texture.
*/
struct texture_object {
- VK_SAMPLER sampler;
+ VkSampler sampler;
- VK_IMAGE image;
- VK_IMAGE_LAYOUT imageLayout;
+ VkImage image;
+ VkImageLayout imageLayout;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
- VK_IMAGE_VIEW view;
+ VkGpuMemory *mem;
+ VkImageView view;
int32_t tex_width, tex_height;
};
@@ -202,55 +202,55 @@ struct demo {
xcb_screen_t *screen;
bool use_staging_buffer;
- VK_INSTANCE inst;
- VK_PHYSICAL_GPU gpu;
- VK_DEVICE device;
- VK_QUEUE queue;
+ VkInstance inst;
+ VkPhysicalGpu gpu;
+ VkDevice device;
+ VkQueue queue;
uint32_t graphics_queue_node_index;
- VK_PHYSICAL_GPU_PROPERTIES *gpu_props;
- VK_PHYSICAL_GPU_QUEUE_PROPERTIES *queue_props;
+ VkPhysicalGpuProperties *gpu_props;
+ VkPhysicalGpuQueueProperties *queue_props;
- VK_FRAMEBUFFER framebuffer;
+ VkFramebuffer framebuffer;
int width, height;
- VK_FORMAT format;
+ VkFormat format;
struct {
- VK_IMAGE image;
- VK_GPU_MEMORY mem;
- VK_CMD_BUFFER cmd;
+ VkImage image;
+ VkGpuMemory mem;
+ VkCmdBuffer cmd;
- VK_COLOR_ATTACHMENT_VIEW view;
- VK_FENCE fence;
+ VkColorAttachmentView view;
+ VkFence fence;
} buffers[DEMO_BUFFER_COUNT];
struct {
- VK_FORMAT format;
+ VkFormat format;
- VK_IMAGE image;
+ VkImage image;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
- VK_DEPTH_STENCIL_VIEW view;
+ VkGpuMemory *mem;
+ VkDepthStencilView view;
} depth;
struct texture_object textures[DEMO_TEXTURE_COUNT];
struct {
- VK_BUFFER buf;
+ VkBuffer buf;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
- VK_BUFFER_VIEW view;
- VK_BUFFER_VIEW_ATTACH_INFO attach;
+ VkGpuMemory *mem;
+ VkBufferView view;
+ VkBufferViewAttachInfo attach;
} uniform_data;
- VK_CMD_BUFFER cmd; // Buffer for initialization commands
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN desc_layout_chain;
- VK_DESCRIPTOR_SET_LAYOUT desc_layout;
- VK_PIPELINE pipeline;
+ VkCmdBuffer cmd; // Buffer for initialization commands
+ VkDescriptorSetLayoutChain desc_layout_chain;
+ VkDescriptorSetLayout desc_layout;
+ VkPipeline pipeline;
- VK_DYNAMIC_VP_STATE_OBJECT viewport;
- VK_DYNAMIC_RS_STATE_OBJECT raster;
- VK_DYNAMIC_CB_STATE_OBJECT color_blend;
- VK_DYNAMIC_DS_STATE_OBJECT depth_stencil;
+ VkDynamicVpStateObject viewport;
+ VkDynamicRsStateObject raster;
+ VkDynamicCbStateObject color_blend;
+ VkDynamicDsStateObject depth_stencil;
mat4x4 projection_matrix;
mat4x4 view_matrix;
@@ -260,8 +260,8 @@ struct demo {
float spin_increment;
bool pause;
- VK_DESCRIPTOR_POOL desc_pool;
- VK_DESCRIPTOR_SET desc_set;
+ VkDescriptorPool desc_pool;
+ VkDescriptorSet desc_set;
xcb_window_t window;
xcb_intern_atom_reply_t *atom_wm_delete_window;
@@ -272,7 +272,7 @@ struct demo {
static void demo_flush_init_cmd(struct demo *demo)
{
- VK_RESULT err;
+ VkResult err;
if (demo->cmd == VK_NULL_HANDLE)
return;
@@ -280,7 +280,7 @@ static void demo_flush_init_cmd(struct demo *demo)
err = vkEndCommandBuffer(demo->cmd);
assert(!err);
- const VK_CMD_BUFFER cmd_bufs[] = { demo->cmd };
+ const VkCmdBuffer cmd_bufs[] = { demo->cmd };
err = vkQueueSubmit(demo->queue, 1, cmd_bufs, VK_NULL_HANDLE);
assert(!err);
@@ -294,7 +294,7 @@ static void demo_flush_init_cmd(struct demo *demo)
static void demo_add_mem_refs(
struct demo *demo,
- int num_refs, VK_GPU_MEMORY *mem)
+ int num_refs, VkGpuMemory *mem)
{
for (int i = 0; i < num_refs; i++) {
vkQueueAddMemReference(demo->queue, mem[i]);
@@ -303,7 +303,7 @@ static void demo_add_mem_refs(
static void demo_remove_mem_refs(
struct demo *demo,
- int num_refs, VK_GPU_MEMORY *mem)
+ int num_refs, VkGpuMemory *mem)
{
for (int i = 0; i < num_refs; i++) {
vkQueueRemoveMemReference(demo->queue, mem[i]);
@@ -312,14 +312,14 @@ static void demo_remove_mem_refs(
static void demo_set_image_layout(
struct demo *demo,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT old_image_layout,
- VK_IMAGE_LAYOUT new_image_layout)
+ VkImage image,
+ VkImageLayout old_image_layout,
+ VkImageLayout new_image_layout)
{
- VK_RESULT err;
+ VkResult err;
if (demo->cmd == VK_NULL_HANDLE) {
- const VK_CMD_BUFFER_CREATE_INFO cmd = {
+ const VkCmdBufferCreateInfo cmd = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
.pNext = NULL,
.queueNodeIndex = demo->graphics_queue_node_index,
@@ -329,7 +329,7 @@ static void demo_set_image_layout(
err = vkCreateCommandBuffer(demo->device, &cmd, &demo->cmd);
assert(!err);
- VK_CMD_BUFFER_BEGIN_INFO cmd_buf_info = {
+ VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
@@ -338,7 +338,7 @@ static void demo_set_image_layout(
err = vkBeginCommandBuffer(demo->cmd, &cmd_buf_info);
}
- VK_IMAGE_MEMORY_BARRIER image_memory_barrier = {
+ VkImageMemoryBarrier image_memory_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = NULL,
.outputMask = 0,
@@ -359,11 +359,11 @@ static void demo_set_image_layout(
image_memory_barrier.outputMask = VK_MEMORY_OUTPUT_COPY_BIT | VK_MEMORY_OUTPUT_CPU_WRITE_BIT;
}
- VK_IMAGE_MEMORY_BARRIER *pmemory_barrier = &image_memory_barrier;
+ VkImageMemoryBarrier *pmemory_barrier = &image_memory_barrier;
- VK_PIPE_EVENT set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
+ VkPipeEvent set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
- VK_PIPELINE_BARRIER pipeline_barrier;
+ VkPipelineBarrier pipeline_barrier;
pipeline_barrier.sType = VK_STRUCTURE_TYPE_PIPELINE_BARRIER;
pipeline_barrier.pNext = NULL;
pipeline_barrier.eventCount = 1;
@@ -375,44 +375,44 @@ static void demo_set_image_layout(
vkCmdPipelineBarrier(demo->cmd, &pipeline_barrier);
}
-static void demo_draw_build_cmd(struct demo *demo, VK_CMD_BUFFER cmd_buf)
+static void demo_draw_build_cmd(struct demo *demo, VkCmdBuffer cmd_buf)
{
- const VK_COLOR_ATTACHMENT_BIND_INFO color_attachment = {
+ const VkColorAttachmentBindInfo color_attachment = {
.view = demo->buffers[demo->current_buffer].view,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
- const VK_DEPTH_STENCIL_BIND_INFO depth_stencil = {
+ const VkDepthStencilBindInfo depth_stencil = {
.view = demo->depth.view,
.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
};
- const VK_CLEAR_COLOR clear_color = {
+ const VkClearColor clear_color = {
.color.floatColor = { 0.2f, 0.2f, 0.2f, 0.2f },
.useRawValue = false,
};
const float clear_depth = 1.0f;
- VK_IMAGE_SUBRESOURCE_RANGE clear_range;
- VK_CMD_BUFFER_BEGIN_INFO cmd_buf_info = {
+ VkImageSubresourceRange clear_range;
+ VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
- VK_RESULT err;
- VK_ATTACHMENT_LOAD_OP load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
- VK_ATTACHMENT_STORE_OP store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE;
- const VK_FRAMEBUFFER_CREATE_INFO fb_info = {
+ VkResult err;
+ VkAttachmentLoadOp load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ VkAttachmentStoreOp store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ const VkFramebufferCreateInfo fb_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = NULL,
.colorAttachmentCount = 1,
- .pColorAttachments = (VK_COLOR_ATTACHMENT_BIND_INFO*) &color_attachment,
- .pDepthStencilAttachment = (VK_DEPTH_STENCIL_BIND_INFO*) &depth_stencil,
+ .pColorAttachments = (VkColorAttachmentBindInfo*) &color_attachment,
+ .pDepthStencilAttachment = (VkDepthStencilBindInfo*) &depth_stencil,
.sampleCount = 1,
.width = demo->width,
.height = demo->height,
.layers = 1,
};
- VK_RENDER_PASS_CREATE_INFO rp_info;
- VK_RENDER_PASS_BEGIN rp_begin;
+ VkRenderPassCreateInfo rp_info;
+ VkRenderPassBegin rp_begin;
memset(&rp_info, 0 , sizeof(rp_info));
err = vkCreateFramebuffer(demo->device, &fb_info, &rp_begin.framebuffer);
@@ -484,7 +484,7 @@ void demo_update_data_buffer(struct demo *demo)
mat4x4 MVP, Model, VP;
int matrixSize = sizeof(MVP);
uint8_t *pData;
- VK_RESULT err;
+ VkResult err;
mat4x4_mul(VP, demo->projection_matrix, demo->view_matrix);
@@ -511,8 +511,8 @@ static void demo_draw(struct demo *demo)
.async = true,
.flip = false,
};
- VK_FENCE fence = demo->buffers[demo->current_buffer].fence;
- VK_RESULT err;
+ VkFence fence = demo->buffers[demo->current_buffer].fence;
+ VkResult err;
err = vkWaitForFences(demo->device, 1, &fence, VK_TRUE, ~((uint64_t) 0));
assert(err == VK_SUCCESS || err == VK_ERROR_UNAVAILABLE);
@@ -538,16 +538,16 @@ static void demo_prepare_buffers(struct demo *demo)
},
.flags = 0,
};
- const VK_FENCE_CREATE_INFO fence = {
+ const VkFenceCreateInfo fence = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = NULL,
.flags = 0,
};
- VK_RESULT err;
+ VkResult err;
uint32_t i;
for (i = 0; i < DEMO_BUFFER_COUNT; i++) {
- VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO color_attachment_view = {
+ VkColorAttachmentViewCreateInfo color_attachment_view = {
.sType = VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO,
.pNext = NULL,
.format = demo->format,
@@ -580,8 +580,8 @@ static void demo_prepare_buffers(struct demo *demo)
static void demo_prepare_depth(struct demo *demo)
{
- const VK_FORMAT depth_format = VK_FMT_D16_UNORM;
- const VK_IMAGE_CREATE_INFO image = {
+ const VkFormat depth_format = VK_FMT_D16_UNORM;
+ const VkImageCreateInfo image = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_2D,
@@ -606,7 +606,7 @@ static void demo_prepare_depth(struct demo *demo)
.memType = VK_MEMORY_TYPE_IMAGE,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_DEPTH_STENCIL_VIEW_CREATE_INFO view = {
+ VkDepthStencilViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
@@ -615,11 +615,11 @@ static void demo_prepare_depth(struct demo *demo)
.arraySize = 1,
.flags = 0,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_IMAGE_MEMORY_REQUIREMENTS img_reqs;
- size_t img_reqs_size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
- VK_RESULT err;
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkImageMemoryRequirements img_reqs;
+ size_t img_reqs_size = sizeof(VkImageMemoryRequirements);
+ VkResult err;
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
@@ -632,17 +632,17 @@ static void demo_prepare_depth(struct demo *demo)
err = vkGetObjectInfo(demo->depth.image, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- demo->depth.mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ demo->depth.mem = malloc(num_allocations * sizeof(VkGpuMemory));
demo->depth.num_mem = num_allocations;
err = vkGetObjectInfo(demo->depth.image,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
- assert(!err && mem_reqs_size == num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
+ assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
err = vkGetObjectInfo(demo->depth.image,
VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS,
&img_reqs_size, &img_reqs);
- assert(!err && img_reqs_size == sizeof(VK_IMAGE_MEMORY_REQUIREMENTS));
+ assert(!err && img_reqs_size == sizeof(VkImageMemoryRequirements));
img_alloc.usage = img_reqs.usage;
img_alloc.formatClass = img_reqs.formatClass;
img_alloc.samples = img_reqs.samples;
@@ -689,7 +689,7 @@ static void demo_prepare_depth(struct demo *demo)
*
*/
bool loadTexture(const char *filename, uint8_t *rgba_data,
- VK_SUBRESOURCE_LAYOUT *layout,
+ VkSubresourceLayout *layout,
int32_t *width, int32_t *height)
{
//header for testing if it is a png
@@ -824,13 +824,13 @@ bool loadTexture(const char *filename, uint8_t *rgba_data,
static void demo_prepare_texture_image(struct demo *demo,
const char *filename,
struct texture_object *tex_obj,
- VK_IMAGE_TILING tiling,
- VK_FLAGS mem_props)
+ VkImageTiling tiling,
+ VkFlags mem_props)
{
- const VK_FORMAT tex_format = VK_FMT_B8G8R8A8_UNORM;
+ const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
int32_t tex_width;
int32_t tex_height;
- VK_RESULT err;
+ VkResult err;
err = loadTexture(filename, NULL, NULL, &tex_width, &tex_height);
assert(err);
@@ -838,7 +838,7 @@ static void demo_prepare_texture_image(struct demo *demo,
tex_obj->tex_width = tex_width;
tex_obj->tex_height = tex_height;
- const VK_IMAGE_CREATE_INFO image_create_info = {
+ const VkImageCreateInfo image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_2D,
@@ -868,12 +868,12 @@ static void demo_prepare_texture_image(struct demo *demo,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_BUFFER_MEMORY_REQUIREMENTS buf_reqs;
- size_t buf_reqs_size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
- VK_IMAGE_MEMORY_REQUIREMENTS img_reqs;
- size_t img_reqs_size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkBufferMemoryRequirements buf_reqs;
+ size_t buf_reqs_size = sizeof(VkBufferMemoryRequirements);
+ VkImageMemoryRequirements img_reqs;
+ size_t img_reqs_size = sizeof(VkImageMemoryRequirements);
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
@@ -885,16 +885,16 @@ static void demo_prepare_texture_image(struct demo *demo,
VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- tex_obj->mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ tex_obj->mem = malloc(num_allocations * sizeof(VkGpuMemory));
err = vkGetObjectInfo(tex_obj->image,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
- assert(!err && mem_reqs_size == num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
+ assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
err = vkGetObjectInfo(tex_obj->image,
VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS,
&img_reqs_size, &img_reqs);
- assert(!err && img_reqs_size == sizeof(VK_IMAGE_MEMORY_REQUIREMENTS));
+ assert(!err && img_reqs_size == sizeof(VkImageMemoryRequirements));
img_alloc.usage = img_reqs.usage;
img_alloc.formatClass = img_reqs.formatClass;
img_alloc.samples = img_reqs.samples;
@@ -907,7 +907,7 @@ static void demo_prepare_texture_image(struct demo *demo,
err = vkGetObjectInfo(tex_obj->image,
VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS,
&buf_reqs_size, &buf_reqs);
- assert(!err && buf_reqs_size == sizeof(VK_BUFFER_MEMORY_REQUIREMENTS));
+ assert(!err && buf_reqs_size == sizeof(VkBufferMemoryRequirements));
buf_alloc.usage = buf_reqs.usage;
img_alloc.pNext = &buf_alloc;
} else {
@@ -929,13 +929,13 @@ static void demo_prepare_texture_image(struct demo *demo,
tex_obj->num_mem = num_allocations;
if (mem_props & VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT) {
- const VK_IMAGE_SUBRESOURCE subres = {
+ const VkImageSubresource subres = {
.aspect = VK_IMAGE_ASPECT_COLOR,
.mipLevel = 0,
.arraySlice = 0,
};
- VK_SUBRESOURCE_LAYOUT layout;
- size_t layout_size = sizeof(VK_SUBRESOURCE_LAYOUT);
+ VkSubresourceLayout layout;
+ size_t layout_size = sizeof(VkSubresourceLayout);
void *data;
err = vkGetImageSubresourceInfo(tex_obj->image, &subres,
@@ -977,10 +977,10 @@ static void demo_destroy_texture_image(struct texture_object *tex_objs)
static void demo_prepare_textures(struct demo *demo)
{
- const VK_FORMAT tex_format = VK_FMT_R8G8B8A8_UNORM;
- VK_FORMAT_PROPERTIES props;
+ const VkFormat tex_format = VK_FMT_R8G8B8A8_UNORM;
+ VkFormatProperties props;
size_t size = sizeof(props);
- VK_RESULT err;
+ VkResult err;
uint32_t i;
err = vkGetFormatInfo(demo->device, tex_format,
@@ -1013,7 +1013,7 @@ static void demo_prepare_textures(struct demo *demo)
demo->textures[i].imageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL);
- VK_IMAGE_COPY copy_region = {
+ VkImageCopy copy_region = {
.srcSubresource = { VK_IMAGE_ASPECT_COLOR, 0, 0 },
.srcOffset = { 0, 0, 0 },
.destSubresource = { VK_IMAGE_ASPECT_COLOR, 0, 0 },
@@ -1041,7 +1041,7 @@ static void demo_prepare_textures(struct demo *demo)
assert(!"No support for tB8G8R8A8_UNORM as texture image format");
}
- const VK_SAMPLER_CREATE_INFO sampler = {
+ const VkSamplerCreateInfo sampler = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = NULL,
.magFilter = VK_TEX_FILTER_NEAREST,
@@ -1058,7 +1058,7 @@ static void demo_prepare_textures(struct demo *demo)
.borderColorType = VK_BORDER_COLOR_OPAQUE_WHITE,
};
- VK_IMAGE_VIEW_CREATE_INFO view = {
+ VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
@@ -1101,16 +1101,16 @@ void demo_prepare_cube_data_buffer(struct demo *demo)
.memType = VK_MEMORY_TYPE_BUFFER,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_BUFFER_MEMORY_REQUIREMENTS buf_reqs;
- size_t buf_reqs_size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkBufferMemoryRequirements buf_reqs;
+ size_t buf_reqs_size = sizeof(VkBufferMemoryRequirements);
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
uint8_t *pData;
int i;
mat4x4 MVP, VP;
- VK_RESULT err;
+ VkResult err;
struct vktexcube_vs_uniform data;
mat4x4_mul(VP, demo->projection_matrix, demo->view_matrix);
@@ -1140,8 +1140,8 @@ void demo_prepare_cube_data_buffer(struct demo *demo)
VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- demo->uniform_data.mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ demo->uniform_data.mem = malloc(num_allocations * sizeof(VkGpuMemory));
demo->uniform_data.num_mem = num_allocations;
err = vkGetObjectInfo(demo->uniform_data.buf,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
@@ -1150,7 +1150,7 @@ void demo_prepare_cube_data_buffer(struct demo *demo)
err = vkGetObjectInfo(demo->uniform_data.buf,
VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS,
&buf_reqs_size, &buf_reqs);
- assert(!err && buf_reqs_size == sizeof(VK_BUFFER_MEMORY_REQUIREMENTS));
+ assert(!err && buf_reqs_size == sizeof(VkBufferMemoryRequirements));
buf_alloc.usage = buf_reqs.usage;
for (uint32_t i = 0; i < num_allocations; i ++) {
alloc_info.allocationSize = mem_reqs[i].size;
@@ -1188,7 +1188,7 @@ void demo_prepare_cube_data_buffer(struct demo *demo)
static void demo_prepare_descriptor_layout(struct demo *demo)
{
- const VK_DESCRIPTOR_SET_LAYOUT_BINDING layout_bindings[2] = {
+ const VkDescriptorSetLayoutBinding layout_bindings[2] = {
[0] = {
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.count = 1,
@@ -1202,13 +1202,13 @@ static void demo_prepare_descriptor_layout(struct demo *demo)
.pImmutableSamplers = NULL,
},
};
- const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO descriptor_layout = {
+ const VkDescriptorSetLayoutCreateInfo descriptor_layout = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = NULL,
.count = 2,
.pBinding = layout_bindings,
};
- VK_RESULT err;
+ VkResult err;
err = vkCreateDescriptorSetLayout(demo->device,
&descriptor_layout, &demo->desc_layout);
@@ -1219,14 +1219,14 @@ static void demo_prepare_descriptor_layout(struct demo *demo)
assert(!err);
}
-static VK_SHADER demo_prepare_shader(struct demo *demo,
- VK_PIPELINE_SHADER_STAGE stage,
+static VkShader demo_prepare_shader(struct demo *demo,
+ VkPipelineShaderStage stage,
const void *code,
size_t size)
{
- VK_SHADER_CREATE_INFO createInfo;
- VK_SHADER shader;
- VK_RESULT err;
+ VkShaderCreateInfo createInfo;
+ VkShader shader;
+ VkResult err;
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO;
@@ -1248,7 +1248,7 @@ static VK_SHADER demo_prepare_shader(struct demo *demo,
createInfo.pCode = malloc(createInfo.codeSize);
createInfo.flags = 0;
- /* try version 0 first: VK_PIPELINE_SHADER_STAGE followed by GLSL */
+ /* try version 0 first: VkPipelineShaderStage followed by GLSL */
((uint32_t *) createInfo.pCode)[0] = ICD_SPV_MAGIC;
((uint32_t *) createInfo.pCode)[1] = 0;
((uint32_t *) createInfo.pCode)[2] = stage;
@@ -1285,7 +1285,7 @@ char *demo_read_spv(const char *filename, size_t *psize)
return shader_code;
}
-static VK_SHADER demo_prepare_vs(struct demo *demo)
+static VkShader demo_prepare_vs(struct demo *demo)
{
#ifdef EXTERNAL_SPV
void *vertShaderCode;
@@ -1321,7 +1321,7 @@ static VK_SHADER demo_prepare_vs(struct demo *demo)
#endif
}
-static VK_SHADER demo_prepare_fs(struct demo *demo)
+static VkShader demo_prepare_fs(struct demo *demo)
{
#ifdef EXTERNAL_SPV
void *fragShaderCode;
@@ -1351,16 +1351,16 @@ static VK_SHADER demo_prepare_fs(struct demo *demo)
static void demo_prepare_pipeline(struct demo *demo)
{
- VK_GRAPHICS_PIPELINE_CREATE_INFO pipeline;
- VK_PIPELINE_IA_STATE_CREATE_INFO ia;
- VK_PIPELINE_RS_STATE_CREATE_INFO rs;
- VK_PIPELINE_CB_STATE_CREATE_INFO cb;
- VK_PIPELINE_DS_STATE_CREATE_INFO ds;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO vs;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO fs;
- VK_PIPELINE_VP_STATE_CREATE_INFO vp;
- VK_PIPELINE_MS_STATE_CREATE_INFO ms;
- VK_RESULT err;
+ VkGraphicsPipelineCreateInfo pipeline;
+ VkPipelineIaStateCreateInfo ia;
+ VkPipelineRsStateCreateInfo rs;
+ VkPipelineCbStateCreateInfo cb;
+ VkPipelineDsStateCreateInfo ds;
+ VkPipelineShaderStageCreateInfo vs;
+ VkPipelineShaderStageCreateInfo fs;
+ VkPipelineVpStateCreateInfo vp;
+ VkPipelineMsStateCreateInfo ms;
+ VkResult err;
memset(&pipeline, 0, sizeof(pipeline));
pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
@@ -1378,7 +1378,7 @@ static void demo_prepare_pipeline(struct demo *demo)
memset(&cb, 0, sizeof(cb));
cb.sType = VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO;
- VK_PIPELINE_CB_ATTACHMENT_STATE att_state[1];
+ VkPipelineCbAttachmentState att_state[1];
memset(att_state, 0, sizeof(att_state));
att_state[0].format = demo->format;
att_state[0].channelWriteMask = 0xf;
@@ -1440,23 +1440,23 @@ static void demo_prepare_pipeline(struct demo *demo)
static void demo_prepare_dynamic_states(struct demo *demo)
{
- VK_DYNAMIC_VP_STATE_CREATE_INFO viewport_create;
- VK_DYNAMIC_RS_STATE_CREATE_INFO raster;
- VK_DYNAMIC_CB_STATE_CREATE_INFO color_blend;
- VK_DYNAMIC_DS_STATE_CREATE_INFO depth_stencil;
- VK_RESULT err;
+ VkDynamicVpStateCreateInfo viewport_create;
+ VkDynamicRsStateCreateInfo raster;
+ VkDynamicCbStateCreateInfo color_blend;
+ VkDynamicDsStateCreateInfo depth_stencil;
+ VkResult err;
memset(&viewport_create, 0, sizeof(viewport_create));
viewport_create.sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO;
viewport_create.viewportAndScissorCount = 1;
- VK_VIEWPORT viewport;
+ VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
viewport.height = (float) demo->height;
viewport.width = (float) demo->width;
viewport.minDepth = (float) 0.0f;
viewport.maxDepth = (float) 1.0f;
viewport_create.pViewports = &viewport;
- VK_RECT scissor;
+ VkRect scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = demo->width;
scissor.extent.height = demo->height;
@@ -1502,7 +1502,7 @@ static void demo_prepare_dynamic_states(struct demo *demo)
static void demo_prepare_descriptor_pool(struct demo *demo)
{
- const VK_DESCRIPTOR_TYPE_COUNT type_counts[2] = {
+ const VkDescriptorTypeCount type_counts[2] = {
[0] = {
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.count = 1,
@@ -1512,13 +1512,13 @@ static void demo_prepare_descriptor_pool(struct demo *demo)
.count = DEMO_TEXTURE_COUNT,
},
};
- const VK_DESCRIPTOR_POOL_CREATE_INFO descriptor_pool = {
+ const VkDescriptorPoolCreateInfo descriptor_pool = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = NULL,
.count = 2,
.pTypeCount = type_counts,
};
- VK_RESULT err;
+ VkResult err;
err = vkCreateDescriptorPool(demo->device,
VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1,
@@ -1528,12 +1528,12 @@ static void demo_prepare_descriptor_pool(struct demo *demo)
static void demo_prepare_descriptor_set(struct demo *demo)
{
- VK_IMAGE_VIEW_ATTACH_INFO view_info[DEMO_TEXTURE_COUNT];
- VK_SAMPLER_IMAGE_VIEW_INFO combined_info[DEMO_TEXTURE_COUNT];
- VK_UPDATE_SAMPLER_TEXTURES update_fs;
- VK_UPDATE_BUFFERS update_vs;
+ VkImageViewAttachInfo view_info[DEMO_TEXTURE_COUNT];
+ VkSamplerImageViewInfo combined_info[DEMO_TEXTURE_COUNT];
+ VkUpdateSamplerTextures update_fs;
+ VkUpdateBuffers update_vs;
const void *update_array[2] = { &update_vs, &update_fs };
- VK_RESULT err;
+ VkResult err;
uint32_t count;
uint32_t i;
@@ -1577,13 +1577,13 @@ static void demo_prepare_descriptor_set(struct demo *demo)
static void demo_prepare(struct demo *demo)
{
- const VK_CMD_BUFFER_CREATE_INFO cmd = {
+ const VkCmdBufferCreateInfo cmd = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
.pNext = NULL,
.queueNodeIndex = demo->graphics_queue_node_index,
.flags = 0,
};
- VK_RESULT err;
+ VkResult err;
demo_prepare_buffers(demo);
demo_prepare_depth(demo);
@@ -1722,7 +1722,7 @@ static void demo_create_window(struct demo *demo)
static void demo_init_vk(struct demo *demo)
{
- const VK_APPLICATION_INFO app = {
+ const VkApplicationInfo app = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = NULL,
.pAppName = "cube",
@@ -1761,7 +1761,7 @@ static void demo_init_vk(struct demo *demo)
.maxValidationLevel = VK_VALIDATION_LEVEL_END_RANGE,
.flags = VK_DEVICE_CREATE_VALIDATION_BIT,
};
- VK_RESULT err;
+ VkResult err;
uint32_t gpu_count;
uint32_t i;
size_t data_size;
@@ -1795,7 +1795,7 @@ static void demo_init_vk(struct demo *demo)
&data_size, NULL);
assert(!err);
- demo->gpu_props = (VK_PHYSICAL_GPU_PROPERTIES *) malloc(data_size);
+ demo->gpu_props = (VkPhysicalGpuProperties *) malloc(data_size);
err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
&data_size, demo->gpu_props);
assert(!err);
@@ -1804,11 +1804,11 @@ static void demo_init_vk(struct demo *demo)
&data_size, NULL);
assert(!err);
- demo->queue_props = (VK_PHYSICAL_GPU_QUEUE_PROPERTIES *) malloc(data_size);
+ demo->queue_props = (VkPhysicalGpuQueueProperties *) malloc(data_size);
err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
&data_size, demo->queue_props);
assert(!err);
- queue_count = (uint32_t)(data_size / sizeof(VK_PHYSICAL_GPU_QUEUE_PROPERTIES));
+ queue_count = (uint32_t)(data_size / sizeof(VkPhysicalGpuQueueProperties));
assert(queue_count >= 1);
for (i = 0; i < queue_count; i++) {
diff --git a/demos/tri.c b/demos/tri.c
index 91b8f0ed..f81ffbd6 100644
--- a/demos/tri.c
+++ b/demos/tri.c
@@ -22,14 +22,14 @@
#define VERTEX_BUFFER_BIND_ID 0
struct texture_object {
- VK_SAMPLER sampler;
+ VkSampler sampler;
- VK_IMAGE image;
- VK_IMAGE_LAYOUT imageLayout;
+ VkImage image;
+ VkImageLayout imageLayout;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
- VK_IMAGE_VIEW view;
+ VkGpuMemory *mem;
+ VkImageView view;
int32_t tex_width, tex_height;
};
@@ -37,58 +37,58 @@ struct demo {
xcb_connection_t *connection;
xcb_screen_t *screen;
- VK_INSTANCE inst;
- VK_PHYSICAL_GPU gpu;
- VK_DEVICE device;
- VK_QUEUE queue;
- VK_PHYSICAL_GPU_PROPERTIES *gpu_props;
- VK_PHYSICAL_GPU_QUEUE_PROPERTIES *queue_props;
+ VkInstance inst;
+ VkPhysicalGpu gpu;
+ VkDevice device;
+ VkQueue queue;
+ VkPhysicalGpuProperties *gpu_props;
+ VkPhysicalGpuQueueProperties *queue_props;
uint32_t graphics_queue_node_index;
int width, height;
- VK_FORMAT format;
+ VkFormat format;
struct {
- VK_IMAGE image;
- VK_GPU_MEMORY mem;
+ VkImage image;
+ VkGpuMemory mem;
- VK_COLOR_ATTACHMENT_VIEW view;
- VK_FENCE fence;
+ VkColorAttachmentView view;
+ VkFence fence;
} buffers[DEMO_BUFFER_COUNT];
struct {
- VK_FORMAT format;
+ VkFormat format;
- VK_IMAGE image;
+ VkImage image;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
- VK_DEPTH_STENCIL_VIEW view;
+ VkGpuMemory *mem;
+ VkDepthStencilView view;
} depth;
struct texture_object textures[DEMO_TEXTURE_COUNT];
struct {
- VK_BUFFER buf;
+ VkBuffer buf;
uint32_t num_mem;
- VK_GPU_MEMORY *mem;
+ VkGpuMemory *mem;
- VK_PIPELINE_VERTEX_INPUT_CREATE_INFO vi;
- VK_VERTEX_INPUT_BINDING_DESCRIPTION vi_bindings[1];
- VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION vi_attrs[2];
+ VkPipelineVertexInputCreateInfo vi;
+ VkVertexInputBindingDescription vi_bindings[1];
+ VkVertexInputAttributeDescription vi_attrs[2];
} vertices;
- VK_CMD_BUFFER cmd; // Buffer for initialization commands
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN desc_layout_chain;
- VK_DESCRIPTOR_SET_LAYOUT desc_layout;
- VK_PIPELINE pipeline;
+ VkCmdBuffer cmd; // Buffer for initialization commands
+ VkDescriptorSetLayoutChain desc_layout_chain;
+ VkDescriptorSetLayout desc_layout;
+ VkPipeline pipeline;
- VK_DYNAMIC_VP_STATE_OBJECT viewport;
- VK_DYNAMIC_RS_STATE_OBJECT raster;
- VK_DYNAMIC_CB_STATE_OBJECT color_blend;
- VK_DYNAMIC_DS_STATE_OBJECT depth_stencil;
+ VkDynamicVpStateObject viewport;
+ VkDynamicRsStateObject raster;
+ VkDynamicCbStateObject color_blend;
+ VkDynamicDsStateObject depth_stencil;
- VK_DESCRIPTOR_POOL desc_pool;
- VK_DESCRIPTOR_SET desc_set;
+ VkDescriptorPool desc_pool;
+ VkDescriptorSet desc_set;
xcb_window_t window;
xcb_intern_atom_reply_t *atom_wm_delete_window;
@@ -100,7 +100,7 @@ struct demo {
static void demo_flush_init_cmd(struct demo *demo)
{
- VK_RESULT err;
+ VkResult err;
if (demo->cmd == VK_NULL_HANDLE)
return;
@@ -108,7 +108,7 @@ static void demo_flush_init_cmd(struct demo *demo)
err = vkEndCommandBuffer(demo->cmd);
assert(!err);
- const VK_CMD_BUFFER cmd_bufs[] = { demo->cmd };
+ const VkCmdBuffer cmd_bufs[] = { demo->cmd };
err = vkQueueSubmit(demo->queue, 1, cmd_bufs, VK_NULL_HANDLE);
assert(!err);
@@ -122,7 +122,7 @@ static void demo_flush_init_cmd(struct demo *demo)
static void demo_add_mem_refs(
struct demo *demo,
- int num_refs, VK_GPU_MEMORY *mem)
+ int num_refs, VkGpuMemory *mem)
{
for (int i = 0; i < num_refs; i++) {
vkQueueAddMemReference(demo->queue, mem[i]);
@@ -131,7 +131,7 @@ static void demo_add_mem_refs(
static void demo_remove_mem_refs(
struct demo *demo,
- int num_refs, VK_GPU_MEMORY *mem)
+ int num_refs, VkGpuMemory *mem)
{
for (int i = 0; i < num_refs; i++) {
vkQueueRemoveMemReference(demo->queue, mem[i]);
@@ -140,14 +140,14 @@ static void demo_remove_mem_refs(
static void demo_set_image_layout(
struct demo *demo,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT old_image_layout,
- VK_IMAGE_LAYOUT new_image_layout)
+ VkImage image,
+ VkImageLayout old_image_layout,
+ VkImageLayout new_image_layout)
{
- VK_RESULT err;
+ VkResult err;
if (demo->cmd == VK_NULL_HANDLE) {
- const VK_CMD_BUFFER_CREATE_INFO cmd = {
+ const VkCmdBufferCreateInfo cmd = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
.pNext = NULL,
.queueNodeIndex = demo->graphics_queue_node_index,
@@ -157,7 +157,7 @@ static void demo_set_image_layout(
err = vkCreateCommandBuffer(demo->device, &cmd, &demo->cmd);
assert(!err);
- VK_CMD_BUFFER_BEGIN_INFO cmd_buf_info = {
+ VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
@@ -166,7 +166,7 @@ static void demo_set_image_layout(
err = vkBeginCommandBuffer(demo->cmd, &cmd_buf_info);
}
- VK_IMAGE_MEMORY_BARRIER image_memory_barrier = {
+ VkImageMemoryBarrier image_memory_barrier = {
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
.pNext = NULL,
.outputMask = 0,
@@ -187,11 +187,11 @@ static void demo_set_image_layout(
image_memory_barrier.outputMask = VK_MEMORY_OUTPUT_COPY_BIT | VK_MEMORY_OUTPUT_CPU_WRITE_BIT;
}
- VK_IMAGE_MEMORY_BARRIER *pmemory_barrier = &image_memory_barrier;
+ VkImageMemoryBarrier *pmemory_barrier = &image_memory_barrier;
- VK_PIPE_EVENT set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
+ VkPipeEvent set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
- VK_PIPELINE_BARRIER pipeline_barrier;
+ VkPipelineBarrier pipeline_barrier;
pipeline_barrier.sType = VK_STRUCTURE_TYPE_PIPELINE_BARRIER;
pipeline_barrier.pNext = NULL;
pipeline_barrier.eventCount = 1;
@@ -205,42 +205,42 @@ static void demo_set_image_layout(
static void demo_draw_build_cmd(struct demo *demo)
{
- const VK_COLOR_ATTACHMENT_BIND_INFO color_attachment = {
+ const VkColorAttachmentBindInfo color_attachment = {
.view = demo->buffers[demo->current_buffer].view,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
- const VK_DEPTH_STENCIL_BIND_INFO depth_stencil = {
+ const VkDepthStencilBindInfo depth_stencil = {
.view = demo->depth.view,
.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
};
- const VK_CLEAR_COLOR clear_color = {
+ const VkClearColor clear_color = {
.color.floatColor = { 0.2f, 0.2f, 0.2f, 0.2f },
.useRawValue = false,
};
const float clear_depth = 0.9f;
- VK_IMAGE_SUBRESOURCE_RANGE clear_range;
- VK_CMD_BUFFER_BEGIN_INFO cmd_buf_info = {
+ VkImageSubresourceRange clear_range;
+ VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
.flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
- VK_RESULT err;
- VK_ATTACHMENT_LOAD_OP load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
- VK_ATTACHMENT_STORE_OP store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE;
- const VK_FRAMEBUFFER_CREATE_INFO fb_info = {
+ VkResult err;
+ VkAttachmentLoadOp load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ VkAttachmentStoreOp store_op = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ const VkFramebufferCreateInfo fb_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.pNext = NULL,
.colorAttachmentCount = 1,
- .pColorAttachments = (VK_COLOR_ATTACHMENT_BIND_INFO*) &color_attachment,
- .pDepthStencilAttachment = (VK_DEPTH_STENCIL_BIND_INFO*) &depth_stencil,
+ .pColorAttachments = (VkColorAttachmentBindInfo*) &color_attachment,
+ .pDepthStencilAttachment = (VkDepthStencilBindInfo*) &depth_stencil,
.sampleCount = 1,
.width = demo->width,
.height = demo->height,
.layers = 1,
};
- VK_RENDER_PASS_CREATE_INFO rp_info;
- VK_RENDER_PASS_BEGIN rp_begin;
+ VkRenderPassCreateInfo rp_info;
+ VkRenderPassBegin rp_begin;
memset(&rp_info, 0 , sizeof(rp_info));
err = vkCreateFramebuffer(demo->device, &fb_info, &rp_begin.framebuffer);
@@ -315,8 +315,8 @@ static void demo_draw(struct demo *demo)
.destWindow = demo->window,
.srcImage = demo->buffers[demo->current_buffer].image,
};
- VK_FENCE fence = demo->buffers[demo->current_buffer].fence;
- VK_RESULT err;
+ VkFence fence = demo->buffers[demo->current_buffer].fence;
+ VkResult err;
demo_draw_build_cmd(demo);
@@ -343,16 +343,16 @@ static void demo_prepare_buffers(struct demo *demo)
},
.flags = 0,
};
- const VK_FENCE_CREATE_INFO fence = {
+ const VkFenceCreateInfo fence = {
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
.pNext = NULL,
.flags = 0,
};
- VK_RESULT err;
+ VkResult err;
uint32_t i;
for (i = 0; i < DEMO_BUFFER_COUNT; i++) {
- VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO color_attachment_view = {
+ VkColorAttachmentViewCreateInfo color_attachment_view = {
.sType = VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO,
.pNext = NULL,
.format = demo->format,
@@ -385,8 +385,8 @@ static void demo_prepare_buffers(struct demo *demo)
static void demo_prepare_depth(struct demo *demo)
{
- const VK_FORMAT depth_format = VK_FMT_D16_UNORM;
- const VK_IMAGE_CREATE_INFO image = {
+ const VkFormat depth_format = VK_FMT_D16_UNORM;
+ const VkImageCreateInfo image = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_2D,
@@ -411,7 +411,7 @@ static void demo_prepare_depth(struct demo *demo)
.memType = VK_MEMORY_TYPE_IMAGE,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_DEPTH_STENCIL_VIEW_CREATE_INFO view = {
+ VkDepthStencilViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
@@ -421,11 +421,11 @@ static void demo_prepare_depth(struct demo *demo)
.flags = 0,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_IMAGE_MEMORY_REQUIREMENTS img_reqs;
- size_t img_reqs_size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
- VK_RESULT err;
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkImageMemoryRequirements img_reqs;
+ size_t img_reqs_size = sizeof(VkImageMemoryRequirements);
+ VkResult err;
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
@@ -438,17 +438,17 @@ static void demo_prepare_depth(struct demo *demo)
err = vkGetObjectInfo(demo->depth.image, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- demo->depth.mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ demo->depth.mem = malloc(num_allocations * sizeof(VkGpuMemory));
demo->depth.num_mem = num_allocations;
err = vkGetObjectInfo(demo->depth.image,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
- assert(!err && mem_reqs_size == num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
+ assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
err = vkGetObjectInfo(demo->depth.image,
VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS,
&img_reqs_size, &img_reqs);
- assert(!err && img_reqs_size == sizeof(VK_IMAGE_MEMORY_REQUIREMENTS));
+ assert(!err && img_reqs_size == sizeof(VkImageMemoryRequirements));
img_alloc.usage = img_reqs.usage;
img_alloc.formatClass = img_reqs.formatClass;
img_alloc.samples = img_reqs.samples;
@@ -482,18 +482,18 @@ static void demo_prepare_depth(struct demo *demo)
static void demo_prepare_texture_image(struct demo *demo,
const uint32_t *tex_colors,
struct texture_object *tex_obj,
- VK_IMAGE_TILING tiling,
- VK_FLAGS mem_props)
+ VkImageTiling tiling,
+ VkFlags mem_props)
{
- const VK_FORMAT tex_format = VK_FMT_B8G8R8A8_UNORM;
+ const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
const int32_t tex_width = 2;
const int32_t tex_height = 2;
- VK_RESULT err;
+ VkResult err;
tex_obj->tex_width = tex_width;
tex_obj->tex_height = tex_height;
- const VK_IMAGE_CREATE_INFO image_create_info = {
+ const VkImageCreateInfo image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
.imageType = VK_IMAGE_2D,
@@ -519,10 +519,10 @@ static void demo_prepare_texture_image(struct demo *demo,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_IMAGE_MEMORY_REQUIREMENTS img_reqs;
- size_t img_reqs_size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkImageMemoryRequirements img_reqs;
+ size_t img_reqs_size = sizeof(VkImageMemoryRequirements);
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
@@ -534,16 +534,16 @@ static void demo_prepare_texture_image(struct demo *demo,
VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- tex_obj->mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ tex_obj->mem = malloc(num_allocations * sizeof(VkGpuMemory));
err = vkGetObjectInfo(tex_obj->image,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
- assert(!err && mem_reqs_size == num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
+ assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
err = vkGetObjectInfo(tex_obj->image,
VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS,
&img_reqs_size, &img_reqs);
- assert(!err && img_reqs_size == sizeof(VK_IMAGE_MEMORY_REQUIREMENTS));
+ assert(!err && img_reqs_size == sizeof(VkImageMemoryRequirements));
img_alloc.usage = img_reqs.usage;
img_alloc.formatClass = img_reqs.formatClass;
img_alloc.samples = img_reqs.samples;
@@ -567,13 +567,13 @@ static void demo_prepare_texture_image(struct demo *demo,
tex_obj->num_mem = num_allocations;
if (mem_props & VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT) {
- const VK_IMAGE_SUBRESOURCE subres = {
+ const VkImageSubresource subres = {
.aspect = VK_IMAGE_ASPECT_COLOR,
.mipLevel = 0,
.arraySlice = 0,
};
- VK_SUBRESOURCE_LAYOUT layout;
- size_t layout_size = sizeof(VK_SUBRESOURCE_LAYOUT);
+ VkSubresourceLayout layout;
+ size_t layout_size = sizeof(VkSubresourceLayout);
void *data;
int32_t x, y;
@@ -618,13 +618,13 @@ static void demo_destroy_texture_image(struct texture_object *tex_obj)
static void demo_prepare_textures(struct demo *demo)
{
- const VK_FORMAT tex_format = VK_FMT_B8G8R8A8_UNORM;
- VK_FORMAT_PROPERTIES props;
+ const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
+ VkFormatProperties props;
size_t size = sizeof(props);
const uint32_t tex_colors[DEMO_TEXTURE_COUNT][2] = {
{ 0xffff0000, 0xff00ff00 },
};
- VK_RESULT err;
+ VkResult err;
uint32_t i;
err = vkGetFormatInfo(demo->device, tex_format,
@@ -656,7 +656,7 @@ static void demo_prepare_textures(struct demo *demo)
demo->textures[i].imageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL);
- VK_IMAGE_COPY copy_region = {
+ VkImageCopy copy_region = {
.srcSubresource = { VK_IMAGE_ASPECT_COLOR, 0, 0 },
.srcOffset = { 0, 0, 0 },
.destSubresource = { VK_IMAGE_ASPECT_COLOR, 0, 0 },
@@ -684,7 +684,7 @@ static void demo_prepare_textures(struct demo *demo)
assert(!"No support for B8G8R8A8_UNORM as texture image format");
}
- const VK_SAMPLER_CREATE_INFO sampler = {
+ const VkSamplerCreateInfo sampler = {
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
.pNext = NULL,
.magFilter = VK_TEX_FILTER_NEAREST,
@@ -700,7 +700,7 @@ static void demo_prepare_textures(struct demo *demo)
.maxLod = 0.0f,
.borderColorType = VK_BORDER_COLOR_OPAQUE_WHITE,
};
- VK_IMAGE_VIEW_CREATE_INFO view = {
+ VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
@@ -754,13 +754,13 @@ static void demo_prepare_vertices(struct demo *demo)
.memType = VK_MEMORY_TYPE_BUFFER,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
- VK_MEMORY_REQUIREMENTS *mem_reqs;
- size_t mem_reqs_size = sizeof(VK_MEMORY_REQUIREMENTS);
- VK_BUFFER_MEMORY_REQUIREMENTS buf_reqs;
- size_t buf_reqs_size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
+ VkMemoryRequirements *mem_reqs;
+ size_t mem_reqs_size = sizeof(VkMemoryRequirements);
+ VkBufferMemoryRequirements buf_reqs;
+ size_t buf_reqs_size = sizeof(VkBufferMemoryRequirements);
uint32_t num_allocations = 0;
size_t num_alloc_size = sizeof(num_allocations);
- VK_RESULT err;
+ VkResult err;
void *data;
memset(&demo->vertices, 0, sizeof(demo->vertices));
@@ -772,8 +772,8 @@ static void demo_prepare_vertices(struct demo *demo)
VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
- mem_reqs = malloc(num_allocations * sizeof(VK_MEMORY_REQUIREMENTS));
- demo->vertices.mem = malloc(num_allocations * sizeof(VK_GPU_MEMORY));
+ mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
+ demo->vertices.mem = malloc(num_allocations * sizeof(VkGpuMemory));
demo->vertices.num_mem = num_allocations;
err = vkGetObjectInfo(demo->vertices.buf,
VK_INFO_TYPE_MEMORY_REQUIREMENTS,
@@ -782,7 +782,7 @@ static void demo_prepare_vertices(struct demo *demo)
err = vkGetObjectInfo(demo->vertices.buf,
VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS,
&buf_reqs_size, &buf_reqs);
- assert(!err && buf_reqs_size == sizeof(VK_BUFFER_MEMORY_REQUIREMENTS));
+ assert(!err && buf_reqs_size == sizeof(VkBufferMemoryRequirements));
buf_alloc.usage = buf_reqs.usage;
for (uint32_t i = 0; i < num_allocations; i ++) {
mem_alloc.allocationSize = mem_reqs[i].size;
@@ -828,19 +828,19 @@ static void demo_prepare_vertices(struct demo *demo)
static void demo_prepare_descriptor_layout(struct demo *demo)
{
- const VK_DESCRIPTOR_SET_LAYOUT_BINDING layout_binding = {
+ const VkDescriptorSetLayoutBinding layout_binding = {
.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE,
.count = DEMO_TEXTURE_COUNT,
.stageFlags = VK_SHADER_STAGE_FLAGS_FRAGMENT_BIT,
.pImmutableSamplers = NULL,
};
- const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO descriptor_layout = {
+ const VkDescriptorSetLayoutCreateInfo descriptor_layout = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.pNext = NULL,
.count = 1,
.pBinding = &layout_binding,
};
- VK_RESULT err;
+ VkResult err;
err = vkCreateDescriptorSetLayout(demo->device,
&descriptor_layout, &demo->desc_layout);
@@ -851,14 +851,14 @@ static void demo_prepare_descriptor_layout(struct demo *demo)
assert(!err);
}
-static VK_SHADER demo_prepare_shader(struct demo *demo,
- VK_PIPELINE_SHADER_STAGE stage,
+static VkShader demo_prepare_shader(struct demo *demo,
+ VkPipelineShaderStage stage,
const void *code,
size_t size)
{
- VK_SHADER_CREATE_INFO createInfo;
- VK_SHADER shader;
- VK_RESULT err;
+ VkShaderCreateInfo createInfo;
+ VkShader shader;
+ VkResult err;
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_CREATE_INFO;
createInfo.pNext = NULL;
@@ -869,7 +869,7 @@ static VK_SHADER demo_prepare_shader(struct demo *demo,
createInfo.pCode = malloc(createInfo.codeSize);
createInfo.flags = 0;
- /* try version 0 first: VK_PIPELINE_SHADER_STAGE followed by GLSL */
+ /* try version 0 first: VkPipelineShaderStage followed by GLSL */
((uint32_t *) createInfo.pCode)[0] = ICD_SPV_MAGIC;
((uint32_t *) createInfo.pCode)[1] = 0;
((uint32_t *) createInfo.pCode)[2] = stage;
@@ -884,7 +884,7 @@ static VK_SHADER demo_prepare_shader(struct demo *demo,
return shader;
}
-static VK_SHADER demo_prepare_vs(struct demo *demo)
+static VkShader demo_prepare_vs(struct demo *demo)
{
static const char *vertShaderText =
"#version 140\n"
@@ -903,7 +903,7 @@ static VK_SHADER demo_prepare_vs(struct demo *demo)
strlen(vertShaderText));
}
-static VK_SHADER demo_prepare_fs(struct demo *demo)
+static VkShader demo_prepare_fs(struct demo *demo)
{
static const char *fragShaderText =
"#version 140\n"
@@ -922,17 +922,17 @@ static VK_SHADER demo_prepare_fs(struct demo *demo)
static void demo_prepare_pipeline(struct demo *demo)
{
- VK_GRAPHICS_PIPELINE_CREATE_INFO pipeline;
- VK_PIPELINE_VERTEX_INPUT_CREATE_INFO vi;
- VK_PIPELINE_IA_STATE_CREATE_INFO ia;
- VK_PIPELINE_RS_STATE_CREATE_INFO rs;
- VK_PIPELINE_CB_STATE_CREATE_INFO cb;
- VK_PIPELINE_DS_STATE_CREATE_INFO ds;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO vs;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO fs;
- VK_PIPELINE_VP_STATE_CREATE_INFO vp;
- VK_PIPELINE_MS_STATE_CREATE_INFO ms;
- VK_RESULT err;
+ VkGraphicsPipelineCreateInfo pipeline;
+ VkPipelineVertexInputCreateInfo vi;
+ VkPipelineIaStateCreateInfo ia;
+ VkPipelineRsStateCreateInfo rs;
+ VkPipelineCbStateCreateInfo cb;
+ VkPipelineDsStateCreateInfo ds;
+ VkPipelineShaderStageCreateInfo vs;
+ VkPipelineShaderStageCreateInfo fs;
+ VkPipelineVpStateCreateInfo vp;
+ VkPipelineMsStateCreateInfo ms;
+ VkResult err;
memset(&pipeline, 0, sizeof(pipeline));
pipeline.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
@@ -952,7 +952,7 @@ static void demo_prepare_pipeline(struct demo *demo)
memset(&cb, 0, sizeof(cb));
cb.sType = VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO;
- VK_PIPELINE_CB_ATTACHMENT_STATE att_state[1];
+ VkPipelineCbAttachmentState att_state[1];
memset(att_state, 0, sizeof(att_state));
att_state[0].format = demo->format;
att_state[0].channelWriteMask = 0xf;
@@ -1015,23 +1015,23 @@ static void demo_prepare_pipeline(struct demo *demo)
static void demo_prepare_dynamic_states(struct demo *demo)
{
- VK_DYNAMIC_VP_STATE_CREATE_INFO viewport_create;
- VK_DYNAMIC_RS_STATE_CREATE_INFO raster;
- VK_DYNAMIC_CB_STATE_CREATE_INFO color_blend;
- VK_DYNAMIC_DS_STATE_CREATE_INFO depth_stencil;
- VK_RESULT err;
+ VkDynamicVpStateCreateInfo viewport_create;
+ VkDynamicRsStateCreateInfo raster;
+ VkDynamicCbStateCreateInfo color_blend;
+ VkDynamicDsStateCreateInfo depth_stencil;
+ VkResult err;
memset(&viewport_create, 0, sizeof(viewport_create));
viewport_create.sType = VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO;
viewport_create.viewportAndScissorCount = 1;
- VK_VIEWPORT viewport;
+ VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
viewport.height = (float) demo->height;
viewport.width = (float) demo->width;
viewport.minDepth = (float) 0.0f;
viewport.maxDepth = (float) 1.0f;
viewport_create.pViewports = &viewport;
- VK_RECT scissor;
+ VkRect scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = demo->width;
scissor.extent.height = demo->height;
@@ -1077,17 +1077,17 @@ static void demo_prepare_dynamic_states(struct demo *demo)
static void demo_prepare_descriptor_pool(struct demo *demo)
{
- const VK_DESCRIPTOR_TYPE_COUNT type_count = {
+ const VkDescriptorTypeCount type_count = {
.type = VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE,
.count = DEMO_TEXTURE_COUNT,
};
- const VK_DESCRIPTOR_POOL_CREATE_INFO descriptor_pool = {
+ const VkDescriptorPoolCreateInfo descriptor_pool = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.pNext = NULL,
.count = 1,
.pTypeCount = &type_count,
};
- VK_RESULT err;
+ VkResult err;
err = vkCreateDescriptorPool(demo->device,
VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT, 1,
@@ -1097,11 +1097,11 @@ static void demo_prepare_descriptor_pool(struct demo *demo)
static void demo_prepare_descriptor_set(struct demo *demo)
{
- VK_IMAGE_VIEW_ATTACH_INFO view_info[DEMO_TEXTURE_COUNT];
- VK_SAMPLER_IMAGE_VIEW_INFO combined_info[DEMO_TEXTURE_COUNT];
- VK_UPDATE_SAMPLER_TEXTURES update;
+ VkImageViewAttachInfo view_info[DEMO_TEXTURE_COUNT];
+ VkSamplerImageViewInfo combined_info[DEMO_TEXTURE_COUNT];
+ VkUpdateSamplerTextures update;
const void *update_array[1] = { &update };
- VK_RESULT err;
+ VkResult err;
uint32_t count;
uint32_t i;
@@ -1137,13 +1137,13 @@ static void demo_prepare_descriptor_set(struct demo *demo)
static void demo_prepare(struct demo *demo)
{
- const VK_CMD_BUFFER_CREATE_INFO cmd = {
+ const VkCmdBufferCreateInfo cmd = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO,
.pNext = NULL,
.queueNodeIndex = demo->graphics_queue_node_index,
.flags = 0,
};
- VK_RESULT err;
+ VkResult err;
demo_prepare_buffers(demo);
demo_prepare_depth(demo);
@@ -1243,7 +1243,7 @@ static void demo_create_window(struct demo *demo)
static void demo_init_vk(struct demo *demo)
{
- const VK_APPLICATION_INFO app = {
+ const VkApplicationInfo app = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = NULL,
.pAppName = "tri",
@@ -1282,7 +1282,7 @@ static void demo_init_vk(struct demo *demo)
.maxValidationLevel = VK_VALIDATION_LEVEL_END_RANGE,
.flags = VK_DEVICE_CREATE_VALIDATION_BIT,
};
- VK_RESULT err;
+ VkResult err;
uint32_t gpu_count;
uint32_t i;
size_t data_size;
@@ -1316,7 +1316,7 @@ static void demo_init_vk(struct demo *demo)
&data_size, NULL);
assert(!err);
- demo->gpu_props = (VK_PHYSICAL_GPU_PROPERTIES *) malloc(data_size);
+ demo->gpu_props = (VkPhysicalGpuProperties *) malloc(data_size);
err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
&data_size, demo->gpu_props);
assert(!err);
@@ -1325,11 +1325,11 @@ static void demo_init_vk(struct demo *demo)
&data_size, NULL);
assert(!err);
- demo->queue_props = (VK_PHYSICAL_GPU_QUEUE_PROPERTIES *) malloc(data_size);
+ demo->queue_props = (VkPhysicalGpuQueueProperties *) malloc(data_size);
err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
&data_size, demo->queue_props);
assert(!err);
- queue_count = (uint32_t) (data_size / sizeof(VK_PHYSICAL_GPU_QUEUE_PROPERTIES));
+ queue_count = (uint32_t) (data_size / sizeof(VkPhysicalGpuQueueProperties));
assert(queue_count >= 1);
for (i = 0; i < queue_count; i++) {
diff --git a/demos/vulkaninfo.c b/demos/vulkaninfo.c
index 3bc416f6..47edf534 100644
--- a/demos/vulkaninfo.c
+++ b/demos/vulkaninfo.c
@@ -45,24 +45,24 @@ struct app_gpu;
struct app_dev {
struct app_gpu *gpu; /* point back to the GPU */
- VK_DEVICE obj;
+ VkDevice obj;
- VK_FORMAT_PROPERTIES format_props[VK_NUM_FMT];
+ VkFormatProperties format_props[VK_NUM_FMT];
};
struct app_gpu {
uint32_t id;
- VK_PHYSICAL_GPU obj;
+ VkPhysicalGpu obj;
- VK_PHYSICAL_GPU_PROPERTIES props;
- VK_PHYSICAL_GPU_PERFORMANCE perf;
+ VkPhysicalGpuProperties props;
+ VkPhysicalGpuPerformance perf;
uint32_t queue_count;
- VK_PHYSICAL_GPU_QUEUE_PROPERTIES *queue_props;
+ VkPhysicalGpuQueueProperties *queue_props;
VkDeviceQueueCreateInfo *queue_reqs;
- VK_PHYSICAL_GPU_MEMORY_PROPERTIES memory_props;
+ VkPhysicalGpuMemoryProperties memory_props;
uint32_t extension_count;
char **extensions;
@@ -70,7 +70,7 @@ struct app_gpu {
struct app_dev dev;
};
-static const char *vk_result_string(VK_RESULT err)
+static const char *vk_result_string(VkResult err)
{
switch (err) {
#define STR(r) case r: return #r
@@ -119,7 +119,7 @@ static const char *vk_result_string(VK_RESULT err)
}
}
-static const char *vk_gpu_type_string(VK_PHYSICAL_GPU_TYPE type)
+static const char *vk_gpu_type_string(VkPhysicalGpuType type)
{
switch (type) {
#define STR(r) case VK_GPU_TYPE_ ##r: return #r
@@ -132,7 +132,7 @@ static const char *vk_gpu_type_string(VK_PHYSICAL_GPU_TYPE type)
}
}
-static const char *vk_format_string(VK_FORMAT fmt)
+static const char *vk_format_string(VkFormat fmt)
{
switch (fmt) {
#define STR(r) case VK_FMT_ ##r: return #r
@@ -310,11 +310,11 @@ static const char *vk_format_string(VK_FORMAT fmt)
static void app_dev_init_formats(struct app_dev *dev)
{
- VK_FORMAT f;
+ VkFormat f;
for (f = 0; f < VK_NUM_FMT; f++) {
- const VK_FORMAT fmt = f;
- VK_RESULT err;
+ const VkFormat fmt = f;
+ VkResult err;
size_t size = sizeof(dev->format_props[f]);
err = vkGetFormatInfo(dev->obj, fmt,
@@ -342,7 +342,7 @@ static void app_dev_init(struct app_dev *dev, struct app_gpu *gpu)
.maxValidationLevel = VK_VALIDATION_LEVEL_END_RANGE,
.flags = VK_DEVICE_CREATE_VALIDATION_BIT,
};
- VK_RESULT err;
+ VkResult err;
/* request all queues */
info.queueRecordCount = gpu->queue_count;
@@ -365,7 +365,7 @@ static void app_dev_destroy(struct app_dev *dev)
static void app_gpu_init_extensions(struct app_gpu *gpu)
{
- VK_RESULT err;
+ VkResult err;
uint32_t i;
static char *known_extensions[] = {
@@ -391,10 +391,10 @@ static void app_gpu_init_extensions(struct app_gpu *gpu)
}
}
-static void app_gpu_init(struct app_gpu *gpu, uint32_t id, VK_PHYSICAL_GPU obj)
+static void app_gpu_init(struct app_gpu *gpu, uint32_t id, VkPhysicalGpu obj)
{
size_t size;
- VK_RESULT err;
+ VkResult err;
uint32_t i;
memset(gpu, 0, sizeof(*gpu));
@@ -464,12 +464,12 @@ static void app_gpu_destroy(struct app_gpu *gpu)
free(gpu->queue_props);
}
-static void app_dev_dump_format_props(const struct app_dev *dev, VK_FORMAT fmt)
+static void app_dev_dump_format_props(const struct app_dev *dev, VkFormat fmt)
{
- const VK_FORMAT_PROPERTIES *props = &dev->format_props[fmt];
+ const VkFormatProperties *props = &dev->format_props[fmt];
struct {
const char *name;
- VK_FLAGS flags;
+ VkFlags flags;
} tilings[2];
uint32_t i;
@@ -507,7 +507,7 @@ static void app_dev_dump_format_props(const struct app_dev *dev, VK_FORMAT fmt)
static void
app_dev_dump(const struct app_dev *dev)
{
- VK_FORMAT fmt;
+ VkFormat fmt;
for (fmt = 0; fmt < VK_NUM_FMT; fmt++) {
app_dev_dump_format_props(dev, fmt);
@@ -515,9 +515,9 @@ app_dev_dump(const struct app_dev *dev)
}
static void app_gpu_dump_multi_compat(const struct app_gpu *gpu, const struct app_gpu *other,
- const VK_GPU_COMPATIBILITY_INFO *info)
+ const VkGpuCompatibilityInfo *info)
{
- printf("VK_GPU_COMPATIBILITY_INFO[GPU%d]\n", other->id);
+ printf("VkGpuCompatibilityInfo[GPU%d]\n", other->id);
#define TEST(info, b) printf(#b " = %u\n", (bool) (info->compatibilityFlags & VK_GPU_COMPAT_ ##b## _BIT))
TEST(info, ASIC_FEATURES);
@@ -532,12 +532,12 @@ static void app_gpu_dump_multi_compat(const struct app_gpu *gpu, const struct ap
static void app_gpu_multi_compat(struct app_gpu *gpus, uint32_t gpu_count)
{
- VK_RESULT err;
+ VkResult err;
uint32_t i, j;
for (i = 0; i < gpu_count; i++) {
for (j = 0; j < gpu_count; j++) {
- VK_GPU_COMPATIBILITY_INFO info;
+ VkGpuCompatibilityInfo info;
if (i == j)
continue;
@@ -554,9 +554,9 @@ static void app_gpu_multi_compat(struct app_gpu *gpus, uint32_t gpu_count)
static void app_gpu_dump_props(const struct app_gpu *gpu)
{
- const VK_PHYSICAL_GPU_PROPERTIES *props = &gpu->props;
+ const VkPhysicalGpuProperties *props = &gpu->props;
- printf("VK_PHYSICAL_GPU_PROPERTIES\n");
+ printf("VkPhysicalGpuProperties\n");
printf("\tapiVersion = %u\n", props->apiVersion);
printf("\tdriverVersion = %u\n", props->driverVersion);
printf("\tvendorId = 0x%04x\n", props->vendorId);
@@ -572,9 +572,9 @@ static void app_gpu_dump_props(const struct app_gpu *gpu)
static void app_gpu_dump_perf(const struct app_gpu *gpu)
{
- const VK_PHYSICAL_GPU_PERFORMANCE *perf = &gpu->perf;
+ const VkPhysicalGpuPerformance *perf = &gpu->perf;
- printf("VK_PHYSICAL_GPU_PERFORMANCE\n");
+ printf("VkPhysicalGpuPerformance\n");
printf("\tmaxGpuClock = %f\n", perf->maxGpuClock);
printf("\taluPerClock = %f\n", perf->aluPerClock);
printf("\ttexPerClock = %f\n", perf->texPerClock);
@@ -598,9 +598,9 @@ static void app_gpu_dump_extensions(const struct app_gpu *gpu)
static void app_gpu_dump_queue_props(const struct app_gpu *gpu, uint32_t id)
{
- const VK_PHYSICAL_GPU_QUEUE_PROPERTIES *props = &gpu->queue_props[id];
+ const VkPhysicalGpuQueueProperties *props = &gpu->queue_props[id];
- printf("VK_PHYSICAL_GPU_QUEUE_PROPERTIES[%d]\n", id);
+ printf("VkPhysicalGpuQueueProperties[%d]\n", id);
printf("\tqueueFlags = %c%c%c%c\n",
(props->queueFlags & VK_QUEUE_GRAPHICS_BIT) ? 'G' : '.',
(props->queueFlags & VK_QUEUE_COMPUTE_BIT) ? 'C' : '.',
@@ -614,9 +614,9 @@ static void app_gpu_dump_queue_props(const struct app_gpu *gpu, uint32_t id)
static void app_gpu_dump_memory_props(const struct app_gpu *gpu)
{
- const VK_PHYSICAL_GPU_MEMORY_PROPERTIES *props = &gpu->memory_props;
+ const VkPhysicalGpuMemoryProperties *props = &gpu->memory_props;
- printf("VK_PHYSICAL_GPU_MEMORY_PROPERTIES\n");
+ printf("VkPhysicalGpuMemoryProperties\n");
printf("\tsupportsMigration = %u\n", props->supportsMigration);
printf("\tsupportsPinning = %u\n", props->supportsPinning);
}
@@ -643,7 +643,7 @@ static void app_gpu_dump(const struct app_gpu *gpu)
int main(int argc, char **argv)
{
- static const VK_APPLICATION_INFO app_info = {
+ static const VkApplicationInfo app_info = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pNext = NULL,
.pAppName = "vkinfo",
@@ -661,10 +661,10 @@ int main(int argc, char **argv)
.ppEnabledExtensionNames = NULL,
};
struct app_gpu gpus[MAX_GPUS];
- VK_PHYSICAL_GPU objs[MAX_GPUS];
- VK_INSTANCE inst;
+ VkPhysicalGpu objs[MAX_GPUS];
+ VkInstance inst;
uint32_t gpu_count, i;
- VK_RESULT err;
+ VkResult err;
err = vkCreateInstance(&inst_info, &inst);
if (err == VK_ERROR_INCOMPATIBLE_DRIVER) {
diff --git a/docs/vk_ds.dot b/docs/vk_ds.dot
index 2615beeb..514614cd 100644
--- a/docs/vk_ds.dot
+++ b/docs/vk_ds.dot
@@ -12,27 +12,27 @@ subgraph clusterDSCreate1
{
label="vkCreateDescriptorSet()"
"_VK_DESCRIPTOR_SET_CREATE_INFO1" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=14</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=14</TD></TR></TABLE>>
];
}
subgraph clusterDSCreate2
{
label="vkCreateDescriptorSet()"
"_VK_DESCRIPTOR_SET_CREATE_INFO2" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=20</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=20</TD></TR></TABLE>>
];
}
subgraph clusterSamplerCreate
{
-label="vkCreateSampler - multiple calls return unique VK_SAMPLER handles"
+label="vkCreateSampler - multiple calls return unique VkSampler handles"
"_VK_SAMPLER_CREATE_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_TEX_FILTER</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VK_TEX_FILTER</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VK_TEX_MIPMAP_MODE</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VK_TEX_ADDRESS</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VK_TEX_ADDRESS</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VK_TEX_ADDRESS</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VK_COMPARE_FUNC</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VK_BORDER_COLOR_TYPE</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
"SAMPLER_ELLIPSES" [
label = "..."
];
"_VK_SAMPLER_CREATE_INFO_19" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_TEX_FILTER</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VK_TEX_FILTER</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VK_TEX_MIPMAP_MODE</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VK_TEX_ADDRESS</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VK_TEX_ADDRESS</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VK_TEX_ADDRESS</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VK_COMPARE_FUNC</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VK_BORDER_COLOR_TYPE</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
}
subgraph clusterSamplerAttach
@@ -52,26 +52,26 @@ subgraph clusterMemoryView
{
label="vkAttachMemoryViewDescriptors - pMemViews array of VK_MEMORY_VIEW_ATTACH_INFO structs"
"_VK_MEMORY_VIEW_ATTACH_INFO_3" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_GPU_MEMORY</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VK_GPU_SIZE</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VK_GPU_SIZE</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VK_GPU_SIZE</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VK_FORMAT</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
"MEM_VIEW_ELLIPSES" [
label = "..."
];
"_VK_MEMORY_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_GPU_MEMORY</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VK_GPU_SIZE</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VK_GPU_SIZE</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VK_GPU_SIZE</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VK_FORMAT</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
}
subgraph clusterImageView
{
-label="vkAttachImageViewDescriptors - pImageViews array of VK_IMAGE_VIEW_ATTACH_INFO structs"
+label="vkAttachImageViewDescriptors - pImageViews array of VkImageViewAttachInfo structs"
"_VK_IMAGE_VIEW_ATTACH_INFO_9" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_IMAGE_VIEW</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkImageViewAttachInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkImageView</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
];
"IMG_VIEW_ELLIPSES" [
label = "..."
];
"_VK_IMAGE_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_IMAGE_VIEW</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkImageViewAttachInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkImageView</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
];
}
"VS_VK_DESCRIPTOR_SET_MAPPING" [
diff --git a/docs/vk_full_pipeline_ds.dot b/docs/vk_full_pipeline_ds.dot
index 6132b3a7..894e235c 100644
--- a/docs/vk_full_pipeline_ds.dot
+++ b/docs/vk_full_pipeline_ds.dot
@@ -8,67 +8,67 @@ shape = "plaintext"
];
edge [
];
-"_VK_GRAPHICS_PIPELINE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VK_FLAGS</TD><TD>flags</TD></TR></TABLE>>
+"VkGraphicsPipelineCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkGraphicsPipelineCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkFlags</TD><TD>flags</TD></TR></TABLE>>
];
-"_VK_PIPELINE_IA_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VK_PRIMITIVE_TOPOLOGY</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VK_PROVOKING_VERTEX_CONVENTION</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
+"VkPipelineIaStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertexConvention</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
];
-"_VK_PIPELINE_TESS_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
+"VkPipelineTessStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineTessStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
];
-"_VK_PIPELINE_RS_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_RS_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>depthClipEnable</TD></TR> <TR><TD>bool32_t</TD><TD>rasterizerDiscardEnable</TD></TR> <TR><TD>float</TD><TD>pointSize</TD></TR> </TABLE>>
+"VkPipelineRsStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineRsStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>depthClipEnable</TD></TR> <TR><TD>bool32_t</TD><TD>rasterizerDiscardEnable</TD></TR> <TR><TD>float</TD><TD>pointSize</TD></TR> </TABLE>>
];
-"_VK_PIPELINE_CB_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_CB_STATE</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>alphaToCoverageEnable</TD></TR> <TR><TD>bool32_t</TD><TD>dualSourceBlendEnable</TD></TR> <TR><TD>VK_LOGIC_OP</TD><TD>logicOp</TD></TR> <TR><TD>VK_PIPELINE_CB_ATTACHMENT_STATE</TD><TD>attachment</TD></TR> </TABLE>>
+"VkPipelineCbStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_CB_STATE</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>alphaToCoverageEnable</TD></TR> <TR><TD>bool32_t</TD><TD>dualSourceBlendEnable</TD></TR> <TR><TD>VkLogicOp</TD><TD>logicOp</TD></TR> <TR><TD>VkPipelineCbAttachmentState</TD><TD>attachment</TD></TR> </TABLE>>
];
"_VK_PIPELINE_DB_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_DB_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_FORMAT</TD><TD>format</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_DB_STATE_CREATE_INFO</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkFormat</TD><TD>format</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"TC_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"TE_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"GS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"FS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TC_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TE_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"GS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"FS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
-"_VK_GRAPHICS_PIPELINE_CREATE_INFO":f2 -> "_VK_PIPELINE_IA_STATE_CREATE_INFO" [
+"VkGraphicsPipelineCreateInfo_":f2 -> "VkPipelineIaStateCreateInfo_" [
id = 100
];
-"_VK_PIPELINE_IA_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_TESS_STATE_CREATE_INFO" [
+"VkPipelineIaStateCreateInfo_":f2 -> "VkPipelineTessStateCreateInfo_" [
id = 101
];
-"_VK_PIPELINE_TESS_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_RS_STATE_CREATE_INFO" [
+"VkPipelineTessStateCreateInfo_":f2 -> "VkPipelineRsStateCreateInfo_" [
id = 102
];
-"_VK_PIPELINE_RS_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_CB_STATE_CREATE_INFO" [
+"VkPipelineRsStateCreateInfo_":f2 -> "VkPipelineCbStateCreateInfo_" [
id = 103
];
-"_VK_PIPELINE_CB_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_DB_STATE_CREATE_INFO" [
+"VkPipelineCbStateCreateInfo_":f2 -> "_VK_PIPELINE_DB_STATE_CREATE_INFO" [
id = 104
];
"_VK_PIPELINE_DB_STATE_CREATE_INFO":f2 -> "VS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
@@ -105,27 +105,27 @@ subgraph clusterDSCreate1
{
label="vkCreateDescriptorSet()"
"_VK_DESCRIPTOR_SET_CREATE_INFO1" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=14</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=14</TD></TR></TABLE>>
];
}
subgraph clusterDSCreate2
{
label="vkCreateDescriptorSet()"
"_VK_DESCRIPTOR_SET_CREATE_INFO2" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=20</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_DESCRIPTOR_SET_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR><TR><TD PORT="f3">uint32_t</TD><TD PORT="f4">slots=20</TD></TR></TABLE>>
];
}
subgraph clusterSamplerCreate
{
-label="vkCreateSampler - multiple calls return unique VK_SAMPLER handles"
+label="vkCreateSampler - multiple calls return unique VkSampler handles"
"_VK_SAMPLER_CREATE_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_TEX_FILTER</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VK_TEX_FILTER</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VK_TEX_MIPMAP_MODE</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VK_TEX_ADDRESS</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VK_TEX_ADDRESS</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VK_TEX_ADDRESS</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VK_COMPARE_FUNC</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VK_BORDER_COLOR_TYPE</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
"SAMPLER_ELLIPSES" [
label = "..."
];
"_VK_SAMPLER_CREATE_INFO_19" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_TEX_FILTER</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VK_TEX_FILTER</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VK_TEX_MIPMAP_MODE</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VK_TEX_ADDRESS</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VK_TEX_ADDRESS</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VK_TEX_ADDRESS</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VK_COMPARE_FUNC</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VK_BORDER_COLOR_TYPE</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
}
subgraph clusterSamplerAttach
@@ -145,26 +145,26 @@ subgraph clusterMemoryView
{
label="vkAttachMemoryViewDescriptors - pMemViews array of VK_MEMORY_VIEW_ATTACH_INFO structs"
"_VK_MEMORY_VIEW_ATTACH_INFO_3" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_GPU_MEMORY</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VK_GPU_SIZE</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VK_GPU_SIZE</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VK_GPU_SIZE</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VK_FORMAT</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
"MEM_VIEW_ELLIPSES" [
label = "..."
];
"_VK_MEMORY_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_GPU_MEMORY</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VK_GPU_SIZE</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VK_GPU_SIZE</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VK_GPU_SIZE</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VK_FORMAT</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
}
subgraph clusterImageView
{
-label="vkAttachImageViewDescriptors - pImageViews array of VK_IMAGE_VIEW_ATTACH_INFO structs"
+label="vkAttachImageViewDescriptors - pImageViews array of VkImageViewAttachInfo structs"
"_VK_IMAGE_VIEW_ATTACH_INFO_9" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_IMAGE_VIEW</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkImageViewAttachInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkImageView</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
];
"IMG_VIEW_ELLIPSES" [
label = "..."
];
"_VK_IMAGE_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VK_STRUCTURE_TYPE</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VK_IMAGE_VIEW</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkImageViewAttachInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkImageView</TD><TD PORT="f6">view</TD></TR><TR><TD PORT="f7">VK_IMAGE_STATE</TD><TD PORT="f8">state</TD></TR></TABLE>>
];
}
"VS_VK_DESCRIPTOR_SET_MAPPING" [
diff --git a/docs/vk_graphics_pipeline.dot b/docs/vk_graphics_pipeline.dot
index b6304552..6aea1c4e 100644
--- a/docs/vk_graphics_pipeline.dot
+++ b/docs/vk_graphics_pipeline.dot
@@ -8,67 +8,67 @@ shape = "plaintext"
];
edge [
];
-"_VK_GRAPHICS_PIPELINE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VK_FLAGS</TD><TD>flags</TD></TR></TABLE>>
+"VkGraphicsPipelineCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkGraphicsPipelineCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkFlags</TD><TD>flags</TD></TR></TABLE>>
];
-"_VK_PIPELINE_IA_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VK_PRIMITIVE_TOPOLOGY</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VK_PROVOKING_VERTEX_CONVENTION</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
+"VkPipelineIaStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertexConvention</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
];
-"_VK_PIPELINE_TESS_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
+"VkPipelineTessStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineTessStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
];
-"_VK_PIPELINE_RS_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_RS_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>depthClipEnable</TD></TR> <TR><TD>bool32_t</TD><TD>rasterizerDiscardEnable</TD></TR> <TR><TD>float</TD><TD>pointSize</TD></TR> </TABLE>>
+"VkPipelineRsStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineRsStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>depthClipEnable</TD></TR> <TR><TD>bool32_t</TD><TD>rasterizerDiscardEnable</TD></TR> <TR><TD>float</TD><TD>pointSize</TD></TR> </TABLE>>
];
-"_VK_PIPELINE_CB_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_CB_STATE</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>alphaToCoverageEnable</TD></TR> <TR><TD>bool32_t</TD><TD>dualSourceBlendEnable</TD></TR> <TR><TD>VK_LOGIC_OP</TD><TD>logicOp</TD></TR> <TR><TD>VK_PIPELINE_CB_ATTACHMENT_STATE</TD><TD>attachment</TD></TR> </TABLE>>
+"VkPipelineCbStateCreateInfo_" [
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_CB_STATE</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>bool32_t</TD><TD>alphaToCoverageEnable</TD></TR> <TR><TD>bool32_t</TD><TD>dualSourceBlendEnable</TD></TR> <TR><TD>VkLogicOp</TD><TD>logicOp</TD></TR> <TR><TD>VkPipelineCbAttachmentState</TD><TD>attachment</TD></TR> </TABLE>>
];
"_VK_PIPELINE_DB_STATE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_DB_STATE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_FORMAT</TD><TD>format</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_DB_STATE_CREATE_INFO</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_DB_STATE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkFormat</TD><TD>format</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"TC_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"TE_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"GS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"FS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR><TR><TD>VK_STRUCTURE_TYPE</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VK_PIPELINE_SHADER</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TC_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TE_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"GS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"FS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_PIPELINE_SHADER</TD></TR><TR><TD PORT="f1">VK_PIPELINE_SHADER_STAGE</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VK_SHADER</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VK_LINK_CONST_BUFFER*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
-"_VK_GRAPHICS_PIPELINE_CREATE_INFO":f2 -> "_VK_PIPELINE_IA_STATE_CREATE_INFO" [
+"VkGraphicsPipelineCreateInfo_":f2 -> "VkPipelineIaStateCreateInfo_" [
id = 0
];
-"_VK_PIPELINE_IA_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_TESS_STATE_CREATE_INFO" [
+"VkPipelineIaStateCreateInfo_":f2 -> "VkPipelineTessStateCreateInfo_" [
id = 1
];
-"_VK_PIPELINE_TESS_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_RS_STATE_CREATE_INFO" [
+"VkPipelineTessStateCreateInfo_":f2 -> "VkPipelineRsStateCreateInfo_" [
id = 2
];
-"_VK_PIPELINE_RS_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_CB_STATE_CREATE_INFO" [
+"VkPipelineRsStateCreateInfo_":f2 -> "VkPipelineCbStateCreateInfo_" [
id = 3
];
-"_VK_PIPELINE_CB_STATE_CREATE_INFO":f2 -> "_VK_PIPELINE_DB_STATE_CREATE_INFO" [
+"VkPipelineCbStateCreateInfo_":f2 -> "_VK_PIPELINE_DB_STATE_CREATE_INFO" [
id = 4
];
"_VK_PIPELINE_DB_STATE_CREATE_INFO":f2 -> "VS_VK_PIPELINE_SHADER_STAGE_CREATE_INFO" [
diff --git a/icd/README.md b/icd/README.md
index bec64cda..871d1c68 100644
--- a/icd/README.md
+++ b/icd/README.md
@@ -8,7 +8,7 @@ This sample driver implementation provide multiple subcomponents required to bui
common/ provides helper and utility functions, as well as all VK entry points
except vkInitAndEnumerateGpus. Hardware drivers are required to provide that
function, and to embed a "VK_LAYER_DISPATCH_TABLE *" as the first member of
-VK_PHYSICAL_GPU and all VK_BASE_OBJECT.
+VkPhysicalGpu and all VkBaseObject.
Thread safety
diff --git a/icd/common/icd-format.c b/icd/common/icd-format.c
index dff3638c..c326cbdc 100644
--- a/icd/common/icd-format.c
+++ b/icd/common/icd-format.c
@@ -205,7 +205,7 @@ static const struct icd_format_info {
[VK_FMT_B10G10R10A2_SINT] = { 4, 4 },
};
-bool icd_format_is_ds(VK_FORMAT format)
+bool icd_format_is_ds(VkFormat format)
{
bool is_ds = false;
@@ -226,7 +226,7 @@ bool icd_format_is_ds(VK_FORMAT format)
return is_ds;
}
-bool icd_format_is_norm(VK_FORMAT format)
+bool icd_format_is_norm(VkFormat format)
{
bool is_norm = false;
@@ -298,7 +298,7 @@ bool icd_format_is_norm(VK_FORMAT format)
return is_norm;
};
-bool icd_format_is_int(VK_FORMAT format)
+bool icd_format_is_int(VkFormat format)
{
bool is_int = false;
@@ -344,7 +344,7 @@ bool icd_format_is_int(VK_FORMAT format)
return is_int;
}
-bool icd_format_is_float(VK_FORMAT format)
+bool icd_format_is_float(VkFormat format)
{
bool is_float = false;
@@ -374,7 +374,7 @@ bool icd_format_is_float(VK_FORMAT format)
return is_float;
}
-bool icd_format_is_srgb(VK_FORMAT format)
+bool icd_format_is_srgb(VkFormat format)
{
bool is_srgb = false;
@@ -412,7 +412,7 @@ bool icd_format_is_srgb(VK_FORMAT format)
return is_srgb;
}
-bool icd_format_is_compressed(VK_FORMAT format)
+bool icd_format_is_compressed(VkFormat format)
{
switch (format) {
case VK_FMT_BC1_RGB_UNORM:
@@ -470,12 +470,12 @@ bool icd_format_is_compressed(VK_FORMAT format)
}
}
-size_t icd_format_get_size(VK_FORMAT format)
+size_t icd_format_get_size(VkFormat format)
{
return icd_format_table[format].size;
}
-VK_IMAGE_FORMAT_CLASS icd_format_get_class(VK_FORMAT format)
+VkImageFormatClass icd_format_get_class(VkFormat format)
{
if (icd_format_is_undef(format))
assert(!"undefined format");
@@ -541,7 +541,7 @@ VK_IMAGE_FORMAT_CLASS icd_format_get_class(VK_FORMAT format)
}
}
-unsigned int icd_format_get_channel_count(VK_FORMAT format)
+unsigned int icd_format_get_channel_count(VkFormat format)
{
return icd_format_table[format].channel_count;
}
@@ -550,7 +550,7 @@ unsigned int icd_format_get_channel_count(VK_FORMAT format)
* Convert a raw RGBA color to a raw value. \p value must have at least
* icd_format_get_size(format) bytes.
*/
-void icd_format_get_raw_value(VK_FORMAT format,
+void icd_format_get_raw_value(VkFormat format,
const uint32_t color[4],
void *value)
{
diff --git a/icd/common/icd-format.h b/icd/common/icd-format.h
index ea227c13..5a7d1f07 100644
--- a/icd/common/icd-format.h
+++ b/icd/common/icd-format.h
@@ -31,35 +31,35 @@
#include <stdbool.h>
#include "icd.h"
-static inline bool icd_format_is_undef(VK_FORMAT format)
+static inline bool icd_format_is_undef(VkFormat format)
{
return (format == VK_FMT_UNDEFINED);
}
-bool icd_format_is_ds(VK_FORMAT format);
+bool icd_format_is_ds(VkFormat format);
-static inline bool icd_format_is_color(VK_FORMAT format)
+static inline bool icd_format_is_color(VkFormat format)
{
return !(icd_format_is_undef(format) || icd_format_is_ds(format));
}
-bool icd_format_is_norm(VK_FORMAT format);
+bool icd_format_is_norm(VkFormat format);
-bool icd_format_is_int(VK_FORMAT format);
+bool icd_format_is_int(VkFormat format);
-bool icd_format_is_float(VK_FORMAT format);
+bool icd_format_is_float(VkFormat format);
-bool icd_format_is_srgb(VK_FORMAT format);
+bool icd_format_is_srgb(VkFormat format);
-bool icd_format_is_compressed(VK_FORMAT format);
+bool icd_format_is_compressed(VkFormat format);
-static inline int icd_format_get_block_width(VK_FORMAT format)
+static inline int icd_format_get_block_width(VkFormat format)
{
/* all compressed formats use 4x4 blocks */
return (icd_format_is_compressed(format)) ? 4 : 1;
}
-static inline bool icd_blend_mode_is_dual_src(VK_BLEND mode)
+static inline bool icd_blend_mode_is_dual_src(VkBlend mode)
{
return (mode == VK_BLEND_SRC1_COLOR) ||
(mode == VK_BLEND_SRC1_ALPHA) ||
@@ -67,7 +67,7 @@ static inline bool icd_blend_mode_is_dual_src(VK_BLEND mode)
(mode == VK_BLEND_ONE_MINUS_SRC1_ALPHA);
}
-static inline bool icd_pipeline_cb_att_needs_dual_source_blending(const VK_PIPELINE_CB_ATTACHMENT_STATE *att)
+static inline bool icd_pipeline_cb_att_needs_dual_source_blending(const VkPipelineCbAttachmentState *att)
{
if (icd_blend_mode_is_dual_src(att->srcBlendColor) ||
icd_blend_mode_is_dual_src(att->srcBlendAlpha) ||
@@ -78,13 +78,13 @@ static inline bool icd_pipeline_cb_att_needs_dual_source_blending(const VK_PIPEL
return false;
}
-size_t icd_format_get_size(VK_FORMAT format);
+size_t icd_format_get_size(VkFormat format);
-VK_IMAGE_FORMAT_CLASS icd_format_get_class(VK_FORMAT format);
+VkImageFormatClass icd_format_get_class(VkFormat format);
-unsigned int icd_format_get_channel_count(VK_FORMAT format);
+unsigned int icd_format_get_channel_count(VkFormat format);
-void icd_format_get_raw_value(VK_FORMAT format,
+void icd_format_get_raw_value(VkFormat format,
const uint32_t color[4],
void *value);
diff --git a/icd/common/icd-instance.c b/icd/common/icd-instance.c
index fed138e7..6d6c2cf4 100644
--- a/icd/common/icd-instance.c
+++ b/icd/common/icd-instance.c
@@ -33,7 +33,7 @@
static void * VKAPI default_alloc(void *user_data, size_t size,
size_t alignment,
- VK_SYSTEM_ALLOC_TYPE allocType)
+ VkSystemAllocType allocType)
{
if (alignment <= 1) {
return malloc(size);
@@ -61,10 +61,10 @@ static void VKAPI default_free(void *user_data, void *ptr)
free(ptr);
}
-struct icd_instance *icd_instance_create(const VK_APPLICATION_INFO *app_info,
- const VK_ALLOC_CALLBACKS *alloc_cb)
+struct icd_instance *icd_instance_create(const VkApplicationInfo *app_info,
+ const VkAllocCallbacks *alloc_cb)
{
- static const VK_ALLOC_CALLBACKS default_alloc_cb = {
+ static const VkAllocCallbacks default_alloc_cb = {
.pfnAlloc = default_alloc,
.pfnFree = default_free,
};
@@ -114,10 +114,10 @@ void icd_instance_destroy(struct icd_instance *instance)
icd_instance_free(instance, instance);
}
-VK_RESULT icd_instance_set_bool(struct icd_instance *instance,
+VkResult icd_instance_set_bool(struct icd_instance *instance,
VK_DBG_GLOBAL_OPTION option, bool yes)
{
- VK_RESULT res = VK_SUCCESS;
+ VkResult res = VK_SUCCESS;
switch (option) {
case VK_DBG_OPTION_DEBUG_ECHO_ENABLE:
@@ -137,7 +137,7 @@ VK_RESULT icd_instance_set_bool(struct icd_instance *instance,
return res;
}
-VK_RESULT icd_instance_add_logger(struct icd_instance *instance,
+VkResult icd_instance_add_logger(struct icd_instance *instance,
VK_DBG_MSG_CALLBACK_FUNCTION func,
void *user_data)
{
@@ -164,7 +164,7 @@ VK_RESULT icd_instance_add_logger(struct icd_instance *instance,
return VK_SUCCESS;
}
-VK_RESULT icd_instance_remove_logger(struct icd_instance *instance,
+VkResult icd_instance_remove_logger(struct icd_instance *instance,
VK_DBG_MSG_CALLBACK_FUNCTION func)
{
struct icd_instance_logger *logger, *prev;
@@ -190,8 +190,8 @@ VK_RESULT icd_instance_remove_logger(struct icd_instance *instance,
void icd_instance_log(const struct icd_instance *instance,
VK_DBG_MSG_TYPE msg_type,
- VK_VALIDATION_LEVEL validation_level,
- VK_BASE_OBJECT src_object,
+ VkValidationLevel validation_level,
+ VkBaseObject src_object,
size_t location, int32_t msg_code,
const char *msg)
{
diff --git a/icd/common/icd-instance.h b/icd/common/icd-instance.h
index 9b7093cf..13a146ba 100644
--- a/icd/common/icd-instance.h
+++ b/icd/common/icd-instance.h
@@ -49,21 +49,21 @@ struct icd_instance {
bool break_on_error;
bool break_on_warning;
- VK_ALLOC_CALLBACKS alloc_cb;
+ VkAllocCallbacks alloc_cb;
struct icd_instance_logger *loggers;
};
-struct icd_instance *icd_instance_create(const VK_APPLICATION_INFO *app_info,
- const VK_ALLOC_CALLBACKS *alloc_cb);
+struct icd_instance *icd_instance_create(const VkApplicationInfo *app_info,
+ const VkAllocCallbacks *alloc_cb);
void icd_instance_destroy(struct icd_instance *instance);
-VK_RESULT icd_instance_set_bool(struct icd_instance *instance,
+VkResult icd_instance_set_bool(struct icd_instance *instance,
VK_DBG_GLOBAL_OPTION option, bool yes);
static inline void *icd_instance_alloc(const struct icd_instance *instance,
size_t size, size_t alignment,
- VK_SYSTEM_ALLOC_TYPE type)
+ VkSystemAllocType type)
{
return instance->alloc_cb.pfnAlloc(instance->alloc_cb.pUserData,
size, alignment, type);
@@ -75,16 +75,16 @@ static inline void icd_instance_free(const struct icd_instance *instance,
instance->alloc_cb.pfnFree(instance->alloc_cb.pUserData, ptr);
}
-VK_RESULT icd_instance_add_logger(struct icd_instance *instance,
+VkResult icd_instance_add_logger(struct icd_instance *instance,
VK_DBG_MSG_CALLBACK_FUNCTION func,
void *user_data);
-VK_RESULT icd_instance_remove_logger(struct icd_instance *instance,
+VkResult icd_instance_remove_logger(struct icd_instance *instance,
VK_DBG_MSG_CALLBACK_FUNCTION func);
void icd_instance_log(const struct icd_instance *instance,
VK_DBG_MSG_TYPE msg_type,
- VK_VALIDATION_LEVEL validation_level,
- VK_BASE_OBJECT src_object,
+ VkValidationLevel validation_level,
+ VkBaseObject src_object,
size_t location, int32_t msg_code,
const char *msg);
diff --git a/icd/nulldrv/nulldrv.c b/icd/nulldrv/nulldrv.c
index 7aac1704..12e73649 100644
--- a/icd/nulldrv/nulldrv.c
+++ b/icd/nulldrv/nulldrv.c
@@ -44,23 +44,23 @@ static const char * const nulldrv_gpu_exts[NULLDRV_EXT_COUNT] = {
[NULLDRV_EXT_WSI_WINDOWS] = "VK_WSI_WINDOWS"
};
-static struct nulldrv_base *nulldrv_base(VK_BASE_OBJECT base)
+static struct nulldrv_base *nulldrv_base(VkBaseObject base)
{
return (struct nulldrv_base *) base;
}
-static VK_RESULT nulldrv_base_get_info(struct nulldrv_base *base, int type,
+static VkResult nulldrv_base_get_info(struct nulldrv_base *base, int type,
size_t *size, void *data)
{
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
size_t s;
uint32_t *count;
switch (type) {
case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
{
- VK_MEMORY_REQUIREMENTS *mem_req = data;
- s = sizeof(VK_MEMORY_REQUIREMENTS);
+ VkMemoryRequirements *mem_req = data;
+ s = sizeof(VkMemoryRequirements);
*size = s;
if (data == NULL)
return ret;
@@ -76,14 +76,14 @@ static VK_RESULT nulldrv_base_get_info(struct nulldrv_base *base, int type,
*count = 1;
break;
case VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
- s = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
+ s = sizeof(VkImageMemoryRequirements);
*size = s;
if (data == NULL)
return ret;
memset(data, 0, s);
break;
case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
- s = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
+ s = sizeof(VkBufferMemoryRequirements);
*size = s;
if (data == NULL)
return ret;
@@ -131,7 +131,7 @@ static struct nulldrv_base *nulldrv_base_create(struct nulldrv_dev *dev,
return base;
}
-static VK_RESULT nulldrv_gpu_add(int devid, const char *primary_node,
+static VkResult nulldrv_gpu_add(int devid, const char *primary_node,
const char *render_node, struct nulldrv_gpu **gpu_ret)
{
struct nulldrv_gpu *gpu;
@@ -149,7 +149,7 @@ static VK_RESULT nulldrv_gpu_add(int devid, const char *primary_node,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_queue_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_queue_create(struct nulldrv_dev *dev,
uint32_t node_index,
struct nulldrv_queue **queue_ret)
{
@@ -167,7 +167,7 @@ static VK_RESULT nulldrv_queue_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT dev_create_queues(struct nulldrv_dev *dev,
+static VkResult dev_create_queues(struct nulldrv_dev *dev,
const VkDeviceQueueCreateInfo *queues,
uint32_t count)
{
@@ -178,7 +178,7 @@ static VK_RESULT dev_create_queues(struct nulldrv_dev *dev,
for (i = 0; i < count; i++) {
const VkDeviceQueueCreateInfo *q = &queues[i];
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
if (q->queueCount == 1 && !dev->queues[q->queueNodeIndex]) {
ret = nulldrv_queue_create(dev, q->queueNodeIndex,
@@ -211,7 +211,7 @@ static enum nulldrv_ext_type nulldrv_gpu_lookup_extension(const struct nulldrv_g
return type;
}
-static VK_RESULT nulldrv_desc_ooxx_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_desc_ooxx_create(struct nulldrv_dev *dev,
struct nulldrv_desc_ooxx **ooxx_ret)
{
struct nulldrv_desc_ooxx *ooxx;
@@ -230,13 +230,13 @@ static VK_RESULT nulldrv_desc_ooxx_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_dev_create(struct nulldrv_gpu *gpu,
+static VkResult nulldrv_dev_create(struct nulldrv_gpu *gpu,
const VkDeviceCreateInfo *info,
struct nulldrv_dev **dev_ret)
{
struct nulldrv_dev *dev;
uint32_t i;
- VK_RESULT ret;
+ VkResult ret;
dev = (struct nulldrv_dev *) nulldrv_base_create(NULL, sizeof(*dev),
VK_DBG_OBJECT_DEVICE);
@@ -269,13 +269,13 @@ static VK_RESULT nulldrv_dev_create(struct nulldrv_gpu *gpu,
return VK_SUCCESS;
}
-static struct nulldrv_gpu *nulldrv_gpu(VK_PHYSICAL_GPU gpu)
+static struct nulldrv_gpu *nulldrv_gpu(VkPhysicalGpu gpu)
{
return (struct nulldrv_gpu *) gpu;
}
-static VK_RESULT nulldrv_rt_view_create(struct nulldrv_dev *dev,
- const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
+static VkResult nulldrv_rt_view_create(struct nulldrv_dev *dev,
+ const VkColorAttachmentViewCreateInfo *info,
struct nulldrv_rt_view **view_ret)
{
struct nulldrv_rt_view *view;
@@ -290,8 +290,8 @@ static VK_RESULT nulldrv_rt_view_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_fence_create(struct nulldrv_dev *dev,
- const VK_FENCE_CREATE_INFO *info,
+static VkResult nulldrv_fence_create(struct nulldrv_dev *dev,
+ const VkFenceCreateInfo *info,
struct nulldrv_fence **fence_ret)
{
struct nulldrv_fence *fence;
@@ -306,7 +306,7 @@ static VK_RESULT nulldrv_fence_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static struct nulldrv_dev *nulldrv_dev(VK_DEVICE dev)
+static struct nulldrv_dev *nulldrv_dev(VkDevice dev)
{
return (struct nulldrv_dev *) dev;
}
@@ -317,18 +317,18 @@ static struct nulldrv_img *nulldrv_img_from_base(struct nulldrv_base *base)
}
-static VK_RESULT img_get_info(struct nulldrv_base *base, int type,
+static VkResult img_get_info(struct nulldrv_base *base, int type,
size_t *size, void *data)
{
struct nulldrv_img *img = nulldrv_img_from_base(base);
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
switch (type) {
case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
{
- VK_MEMORY_REQUIREMENTS *mem_req = data;
+ VkMemoryRequirements *mem_req = data;
- *size = sizeof(VK_MEMORY_REQUIREMENTS);
+ *size = sizeof(VkMemoryRequirements);
if (data == NULL)
return ret;
mem_req->size = img->total_size;
@@ -338,9 +338,9 @@ static VK_RESULT img_get_info(struct nulldrv_base *base, int type,
break;
case VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS:
{
- VK_IMAGE_MEMORY_REQUIREMENTS *img_req = data;
+ VkImageMemoryRequirements *img_req = data;
- *size = sizeof(VK_IMAGE_MEMORY_REQUIREMENTS);
+ *size = sizeof(VkImageMemoryRequirements);
if (data == NULL)
return ret;
img_req->usage = img->usage;
@@ -350,9 +350,9 @@ static VK_RESULT img_get_info(struct nulldrv_base *base, int type,
break;
case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
{
- VK_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
+ VkBufferMemoryRequirements *buf_req = data;
- *size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
+ *size = sizeof(VkBufferMemoryRequirements);
if (data == NULL)
return ret;
buf_req->usage = img->usage;
@@ -366,8 +366,8 @@ static VK_RESULT img_get_info(struct nulldrv_base *base, int type,
return ret;
}
-static VK_RESULT nulldrv_img_create(struct nulldrv_dev *dev,
- const VK_IMAGE_CREATE_INFO *info,
+static VkResult nulldrv_img_create(struct nulldrv_dev *dev,
+ const VkImageCreateInfo *info,
bool scanout,
struct nulldrv_img **img_ret)
{
@@ -396,12 +396,12 @@ static VK_RESULT nulldrv_img_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static struct nulldrv_img *nulldrv_img(VK_IMAGE image)
+static struct nulldrv_img *nulldrv_img(VkImage image)
{
return (struct nulldrv_img *) image;
}
-static VK_RESULT nulldrv_mem_alloc(struct nulldrv_dev *dev,
+static VkResult nulldrv_mem_alloc(struct nulldrv_dev *dev,
const VkMemoryAllocInfo *info,
struct nulldrv_mem **mem_ret)
{
@@ -424,8 +424,8 @@ static VK_RESULT nulldrv_mem_alloc(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_ds_view_create(struct nulldrv_dev *dev,
- const VK_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
+static VkResult nulldrv_ds_view_create(struct nulldrv_dev *dev,
+ const VkDepthStencilViewCreateInfo *info,
struct nulldrv_ds_view **view_ret)
{
struct nulldrv_img *img = nulldrv_img(info->image);
@@ -445,8 +445,8 @@ static VK_RESULT nulldrv_ds_view_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_sampler_create(struct nulldrv_dev *dev,
- const VK_SAMPLER_CREATE_INFO *info,
+static VkResult nulldrv_sampler_create(struct nulldrv_dev *dev,
+ const VkSamplerCreateInfo *info,
struct nulldrv_sampler **sampler_ret)
{
struct nulldrv_sampler *sampler;
@@ -461,8 +461,8 @@ static VK_RESULT nulldrv_sampler_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_img_view_create(struct nulldrv_dev *dev,
- const VK_IMAGE_VIEW_CREATE_INFO *info,
+static VkResult nulldrv_img_view_create(struct nulldrv_dev *dev,
+ const VkImageViewCreateInfo *info,
struct nulldrv_img_view **view_ret)
{
struct nulldrv_img *img = nulldrv_img(info->image);
@@ -483,12 +483,12 @@ static VK_RESULT nulldrv_img_view_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static void *nulldrv_mem_map(struct nulldrv_mem *mem, VK_FLAGS flags)
+static void *nulldrv_mem_map(struct nulldrv_mem *mem, VkFlags flags)
{
return mem->bo;
}
-static struct nulldrv_mem *nulldrv_mem(VK_GPU_MEMORY mem)
+static struct nulldrv_mem *nulldrv_mem(VkGpuMemory mem)
{
return (struct nulldrv_mem *) mem;
}
@@ -498,18 +498,18 @@ static struct nulldrv_buf *nulldrv_buf_from_base(struct nulldrv_base *base)
return (struct nulldrv_buf *) base;
}
-static VK_RESULT buf_get_info(struct nulldrv_base *base, int type,
+static VkResult buf_get_info(struct nulldrv_base *base, int type,
size_t *size, void *data)
{
struct nulldrv_buf *buf = nulldrv_buf_from_base(base);
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
switch (type) {
case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
{
- VK_MEMORY_REQUIREMENTS *mem_req = data;
+ VkMemoryRequirements *mem_req = data;
- *size = sizeof(VK_MEMORY_REQUIREMENTS);
+ *size = sizeof(VkMemoryRequirements);
if (data == NULL)
return ret;
@@ -521,9 +521,9 @@ static VK_RESULT buf_get_info(struct nulldrv_base *base, int type,
break;
case VK_INFO_TYPE_BUFFER_MEMORY_REQUIREMENTS:
{
- VK_BUFFER_MEMORY_REQUIREMENTS *buf_req = data;
+ VkBufferMemoryRequirements *buf_req = data;
- *size = sizeof(VK_BUFFER_MEMORY_REQUIREMENTS);
+ *size = sizeof(VkBufferMemoryRequirements);
if (data == NULL)
return ret;
buf_req->usage = buf->usage;
@@ -537,7 +537,7 @@ static VK_RESULT buf_get_info(struct nulldrv_base *base, int type,
return ret;
}
-static VK_RESULT nulldrv_buf_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_buf_create(struct nulldrv_dev *dev,
const VkBufferCreateInfo *info,
struct nulldrv_buf **buf_ret)
{
@@ -558,8 +558,8 @@ static VK_RESULT nulldrv_buf_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_desc_layout_create(struct nulldrv_dev *dev,
- const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO *info,
+static VkResult nulldrv_desc_layout_create(struct nulldrv_dev *dev,
+ const VkDescriptorSetLayoutCreateInfo *info,
struct nulldrv_desc_layout **layout_ret)
{
struct nulldrv_desc_layout *layout;
@@ -575,9 +575,9 @@ static VK_RESULT nulldrv_desc_layout_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_desc_layout_chain_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_desc_layout_chain_create(struct nulldrv_dev *dev,
uint32_t setLayoutArrayCount,
- const VK_DESCRIPTOR_SET_LAYOUT *pSetLayoutArray,
+ const VkDescriptorSetLayout *pSetLayoutArray,
struct nulldrv_desc_layout_chain **chain_ret)
{
struct nulldrv_desc_layout_chain *chain;
@@ -593,13 +593,13 @@ static VK_RESULT nulldrv_desc_layout_chain_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static struct nulldrv_desc_layout *nulldrv_desc_layout(VK_DESCRIPTOR_SET_LAYOUT layout)
+static struct nulldrv_desc_layout *nulldrv_desc_layout(VkDescriptorSetLayout layout)
{
return (struct nulldrv_desc_layout *) layout;
}
-static VK_RESULT shader_create(struct nulldrv_dev *dev,
- const VK_SHADER_CREATE_INFO *info,
+static VkResult shader_create(struct nulldrv_dev *dev,
+ const VkShaderCreateInfo *info,
struct nulldrv_shader **sh_ret)
{
struct nulldrv_shader *sh;
@@ -614,8 +614,8 @@ static VK_RESULT shader_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT graphics_pipeline_create(struct nulldrv_dev *dev,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO *info_,
+static VkResult graphics_pipeline_create(struct nulldrv_dev *dev,
+ const VkGraphicsPipelineCreateInfo *info_,
struct nulldrv_pipeline **pipeline_ret)
{
struct nulldrv_pipeline *pipeline;
@@ -631,8 +631,8 @@ static VK_RESULT graphics_pipeline_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_viewport_state_create(struct nulldrv_dev *dev,
- const VK_DYNAMIC_VP_STATE_CREATE_INFO *info,
+static VkResult nulldrv_viewport_state_create(struct nulldrv_dev *dev,
+ const VkDynamicVpStateCreateInfo *info,
struct nulldrv_dynamic_vp **state_ret)
{
struct nulldrv_dynamic_vp *state;
@@ -647,8 +647,8 @@ static VK_RESULT nulldrv_viewport_state_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_raster_state_create(struct nulldrv_dev *dev,
- const VK_DYNAMIC_RS_STATE_CREATE_INFO *info,
+static VkResult nulldrv_raster_state_create(struct nulldrv_dev *dev,
+ const VkDynamicRsStateCreateInfo *info,
struct nulldrv_dynamic_rs **state_ret)
{
struct nulldrv_dynamic_rs *state;
@@ -663,8 +663,8 @@ static VK_RESULT nulldrv_raster_state_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_blend_state_create(struct nulldrv_dev *dev,
- const VK_DYNAMIC_CB_STATE_CREATE_INFO *info,
+static VkResult nulldrv_blend_state_create(struct nulldrv_dev *dev,
+ const VkDynamicCbStateCreateInfo *info,
struct nulldrv_dynamic_cb **state_ret)
{
struct nulldrv_dynamic_cb *state;
@@ -679,8 +679,8 @@ static VK_RESULT nulldrv_blend_state_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_ds_state_create(struct nulldrv_dev *dev,
- const VK_DYNAMIC_DS_STATE_CREATE_INFO *info,
+static VkResult nulldrv_ds_state_create(struct nulldrv_dev *dev,
+ const VkDynamicDsStateCreateInfo *info,
struct nulldrv_dynamic_ds **state_ret)
{
struct nulldrv_dynamic_ds *state;
@@ -696,8 +696,8 @@ static VK_RESULT nulldrv_ds_state_create(struct nulldrv_dev *dev,
}
-static VK_RESULT nulldrv_cmd_create(struct nulldrv_dev *dev,
- const VK_CMD_BUFFER_CREATE_INFO *info,
+static VkResult nulldrv_cmd_create(struct nulldrv_dev *dev,
+ const VkCmdBufferCreateInfo *info,
struct nulldrv_cmd **cmd_ret)
{
struct nulldrv_cmd *cmd;
@@ -712,10 +712,10 @@ static VK_RESULT nulldrv_cmd_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_desc_pool_create(struct nulldrv_dev *dev,
- VK_DESCRIPTOR_POOL_USAGE usage,
+static VkResult nulldrv_desc_pool_create(struct nulldrv_dev *dev,
+ VkDescriptorPoolUsage usage,
uint32_t max_sets,
- const VK_DESCRIPTOR_POOL_CREATE_INFO *info,
+ const VkDescriptorPoolCreateInfo *info,
struct nulldrv_desc_pool **pool_ret)
{
struct nulldrv_desc_pool *pool;
@@ -733,9 +733,9 @@ static VK_RESULT nulldrv_desc_pool_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static VK_RESULT nulldrv_desc_set_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_desc_set_create(struct nulldrv_dev *dev,
struct nulldrv_desc_pool *pool,
- VK_DESCRIPTOR_SET_USAGE usage,
+ VkDescriptorSetUsage usage,
const struct nulldrv_desc_layout *layout,
struct nulldrv_desc_set **set_ret)
{
@@ -754,13 +754,13 @@ static VK_RESULT nulldrv_desc_set_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static struct nulldrv_desc_pool *nulldrv_desc_pool(VK_DESCRIPTOR_POOL pool)
+static struct nulldrv_desc_pool *nulldrv_desc_pool(VkDescriptorPool pool)
{
return (struct nulldrv_desc_pool *) pool;
}
-static VK_RESULT nulldrv_fb_create(struct nulldrv_dev *dev,
- const VK_FRAMEBUFFER_CREATE_INFO* info,
+static VkResult nulldrv_fb_create(struct nulldrv_dev *dev,
+ const VkFramebufferCreateInfo* info,
struct nulldrv_framebuffer ** fb_ret)
{
@@ -776,8 +776,8 @@ static VK_RESULT nulldrv_fb_create(struct nulldrv_dev *dev,
}
-static VK_RESULT nulldrv_render_pass_create(struct nulldrv_dev *dev,
- const VK_RENDER_PASS_CREATE_INFO* info,
+static VkResult nulldrv_render_pass_create(struct nulldrv_dev *dev,
+ const VkRenderPassCreateInfo* info,
struct nulldrv_render_pass** rp_ret)
{
struct nulldrv_render_pass *rp;
@@ -791,12 +791,12 @@ static VK_RESULT nulldrv_render_pass_create(struct nulldrv_dev *dev,
return VK_SUCCESS;
}
-static struct nulldrv_buf *nulldrv_buf(VK_BUFFER buf)
+static struct nulldrv_buf *nulldrv_buf(VkBuffer buf)
{
return (struct nulldrv_buf *) buf;
}
-static VK_RESULT nulldrv_buf_view_create(struct nulldrv_dev *dev,
+static VkResult nulldrv_buf_view_create(struct nulldrv_dev *dev,
const VkBufferViewCreateInfo *info,
struct nulldrv_buf_view **view_ret)
{
@@ -820,10 +820,10 @@ static VK_RESULT nulldrv_buf_view_create(struct nulldrv_dev *dev,
// Driver entry points
//*********************************************
-ICD_EXPORT VK_RESULT VKAPI vkCreateBuffer(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkCreateBuffer(
+ VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
- VK_BUFFER* pBuffer)
+ VkBuffer* pBuffer)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -831,10 +831,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateBuffer(
return nulldrv_buf_create(dev, pCreateInfo, (struct nulldrv_buf **) pBuffer);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(
- VK_DEVICE device,
- const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo,
- VK_CMD_BUFFER* pCmdBuffer)
+ICD_EXPORT VkResult VKAPI vkCreateCommandBuffer(
+ VkDevice device,
+ const VkCmdBufferCreateInfo* pCreateInfo,
+ VkCmdBuffer* pCmdBuffer)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -843,31 +843,31 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(
(struct nulldrv_cmd **) pCmdBuffer);
}
-ICD_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(
- VK_CMD_BUFFER cmdBuffer,
- const VK_CMD_BUFFER_BEGIN_INFO *info)
+ICD_EXPORT VkResult VKAPI vkBeginCommandBuffer(
+ VkCmdBuffer cmdBuffer,
+ const VkCmdBufferBeginInfo *info)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(
- VK_CMD_BUFFER cmdBuffer)
+ICD_EXPORT VkResult VKAPI vkEndCommandBuffer(
+ VkCmdBuffer cmdBuffer)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(
- VK_CMD_BUFFER cmdBuffer)
+ICD_EXPORT VkResult VKAPI vkResetCommandBuffer(
+ VkCmdBuffer cmdBuffer)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
ICD_EXPORT void VKAPI vkCmdInitAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
const uint32_t* pData)
@@ -876,181 +876,181 @@ ICD_EXPORT void VKAPI vkCmdInitAtomicCounters(
}
ICD_EXPORT void VKAPI vkCmdLoadAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
- VK_BUFFER srcBuffer,
- VK_GPU_SIZE srcOffset)
+ VkBuffer srcBuffer,
+ VkGpuSize srcOffset)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdSaveAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset)
+ VkBuffer destBuffer,
+ VkGpuSize destOffset)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdDbgMarkerBegin(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
const char* pMarker)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdDbgMarkerEnd(
- VK_CMD_BUFFER cmdBuffer)
+ VkCmdBuffer cmdBuffer)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdCopyBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_BUFFER destBuffer,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer destBuffer,
uint32_t regionCount,
- const VK_BUFFER_COPY* pRegions)
+ const VkBufferCopy* pRegions)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdCopyImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_IMAGE_COPY* pRegions)
+ const VkImageCopy* pRegions)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBlitImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_IMAGE_BLIT* pRegions)
+ const VkImageBlit* pRegions)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdCopyBufferToImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_BUFFER_IMAGE_COPY* pRegions)
+ const VkBufferImageCopy* pRegions)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdCopyImageToBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_BUFFER destBuffer,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
uint32_t regionCount,
- const VK_BUFFER_IMAGE_COPY* pRegions)
+ const VkBufferImageCopy* pRegions)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdCloneImageData(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout)
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdUpdateBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset,
- VK_GPU_SIZE dataSize,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset,
+ VkGpuSize dataSize,
const uint32_t* pData)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdFillBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset,
- VK_GPU_SIZE fillSize,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset,
+ VkGpuSize fillSize,
uint32_t data)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdClearColorImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT imageLayout,
- VK_CLEAR_COLOR color,
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ VkClearColor color,
uint32_t rangeCount,
- const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+ const VkImageSubresourceRange* pRanges)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdClearDepthStencil(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT imageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
float depth,
uint32_t stencil,
uint32_t rangeCount,
- const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+ const VkImageSubresourceRange* pRanges)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdResolveImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t rectCount,
- const VK_IMAGE_RESOLVE* pRects)
+ const VkImageResolve* pRects)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBeginQuery(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t slot,
- VK_FLAGS flags)
+ VkFlags flags)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdEndQuery(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t slot)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdResetQueryPool(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount)
{
@@ -1058,78 +1058,78 @@ ICD_EXPORT void VKAPI vkCmdResetQueryPool(
}
ICD_EXPORT void VKAPI vkCmdSetEvent(
- VK_CMD_BUFFER cmdBuffer,
- VK_EVENT event_,
- VK_PIPE_EVENT pipeEvent)
+ VkCmdBuffer cmdBuffer,
+ VkEvent event_,
+ VkPipeEvent pipeEvent)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdResetEvent(
- VK_CMD_BUFFER cmdBuffer,
- VK_EVENT event_,
- VK_PIPE_EVENT pipeEvent)
+ VkCmdBuffer cmdBuffer,
+ VkEvent event_,
+ VkPipeEvent pipeEvent)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdWriteTimestamp(
- VK_CMD_BUFFER cmdBuffer,
- VK_TIMESTAMP_TYPE timestampType,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset)
+ VkCmdBuffer cmdBuffer,
+ VkTimestampType timestampType,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindPipeline(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
- VK_PIPELINE pipeline)
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindDynamicStateObject(
- VK_CMD_BUFFER cmdBuffer,
- VK_STATE_BIND_POINT stateBindPoint,
- VK_DYNAMIC_STATE_OBJECT state)
+ VkCmdBuffer cmdBuffer,
+ VkStateBindPoint stateBindPoint,
+ VkDynamicStateObject state)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindDescriptorSets(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkDescriptorSetLayoutChain layoutChain,
uint32_t layoutChainSlot,
uint32_t count,
- const VK_DESCRIPTOR_SET* pDescriptorSets,
+ const VkDescriptorSet* pDescriptorSets,
const uint32_t* pUserData)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindVertexBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t binding)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindIndexBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
- VK_INDEX_TYPE indexType)
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
+ VkIndexType indexType)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdDraw(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t firstVertex,
uint32_t vertexCount,
uint32_t firstInstance,
@@ -1139,7 +1139,7 @@ ICD_EXPORT void VKAPI vkCmdDraw(
}
ICD_EXPORT void VKAPI vkCmdDrawIndexed(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t firstIndex,
uint32_t indexCount,
int32_t vertexOffset,
@@ -1150,9 +1150,9 @@ ICD_EXPORT void VKAPI vkCmdDrawIndexed(
}
ICD_EXPORT void VKAPI vkCmdDrawIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t count,
uint32_t stride)
{
@@ -1160,9 +1160,9 @@ ICD_EXPORT void VKAPI vkCmdDrawIndirect(
}
ICD_EXPORT void VKAPI vkCmdDrawIndexedIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t count,
uint32_t stride)
{
@@ -1170,7 +1170,7 @@ ICD_EXPORT void VKAPI vkCmdDrawIndexedIndirect(
}
ICD_EXPORT void VKAPI vkCmdDispatch(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t x,
uint32_t y,
uint32_t z)
@@ -1179,49 +1179,49 @@ ICD_EXPORT void VKAPI vkCmdDispatch(
}
ICD_EXPORT void VKAPI vkCmdDispatchIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset)
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdWaitEvents(
- VK_CMD_BUFFER cmdBuffer,
- const VK_EVENT_WAIT_INFO* pWaitInfo)
+ VkCmdBuffer cmdBuffer,
+ const VkEventWaitInfo* pWaitInfo)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdPipelineBarrier(
- VK_CMD_BUFFER cmdBuffer,
- const VK_PIPELINE_BARRIER* pBarrier)
+ VkCmdBuffer cmdBuffer,
+ const VkPipelineBarrier* pBarrier)
{
NULLDRV_LOG_FUNC;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDevice(
- VK_PHYSICAL_GPU gpu_,
+ICD_EXPORT VkResult VKAPI vkCreateDevice(
+ VkPhysicalGpu gpu_,
const VkDeviceCreateInfo* pCreateInfo,
- VK_DEVICE* pDevice)
+ VkDevice* pDevice)
{
NULLDRV_LOG_FUNC;
struct nulldrv_gpu *gpu = nulldrv_gpu(gpu_);
return nulldrv_dev_create(gpu, pCreateInfo, (struct nulldrv_dev**)pDevice);
}
-ICD_EXPORT VK_RESULT VKAPI vkDestroyDevice(
- VK_DEVICE device)
+ICD_EXPORT VkResult VKAPI vkDestroyDevice(
+ VkDevice device)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkGetDeviceQueue(
+ VkDevice device,
uint32_t queueNodeIndex,
uint32_t queueIndex,
- VK_QUEUE* pQueue)
+ VkQueue* pQueue)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1229,23 +1229,23 @@ ICD_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(
- VK_DEVICE device)
+ICD_EXPORT VkResult VKAPI vkDeviceWaitIdle(
+ VkDevice device)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgSetValidationLevel(
- VK_DEVICE device,
- VK_VALIDATION_LEVEL validationLevel)
+ICD_EXPORT VkResult VKAPI vkDbgSetValidationLevel(
+ VkDevice device,
+ VkValidationLevel validationLevel)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkDbgSetMessageFilter(
+ VkDevice device,
int32_t msgCode,
VK_DBG_MSG_FILTER filter)
{
@@ -1253,8 +1253,8 @@ ICD_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkDbgSetDeviceOption(
+ VkDevice device,
VK_DBG_DEVICE_OPTION dbgOption,
size_t dataSize,
const void* pData)
@@ -1263,40 +1263,40 @@ ICD_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateEvent(
- VK_DEVICE device,
- const VK_EVENT_CREATE_INFO* pCreateInfo,
- VK_EVENT* pEvent)
+ICD_EXPORT VkResult VKAPI vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ VkEvent* pEvent)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetEventStatus(
- VK_EVENT event_)
+ICD_EXPORT VkResult VKAPI vkGetEventStatus(
+ VkEvent event_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkSetEvent(
- VK_EVENT event_)
+ICD_EXPORT VkResult VKAPI vkSetEvent(
+ VkEvent event_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkResetEvent(
- VK_EVENT event_)
+ICD_EXPORT VkResult VKAPI vkResetEvent(
+ VkEvent event_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateFence(
- VK_DEVICE device,
- const VK_FENCE_CREATE_INFO* pCreateInfo,
- VK_FENCE* pFence)
+ICD_EXPORT VkResult VKAPI vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ VkFence* pFence)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1305,26 +1305,26 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateFence(
(struct nulldrv_fence **) pFence);
}
-ICD_EXPORT VK_RESULT VKAPI vkGetFenceStatus(
- VK_FENCE fence_)
+ICD_EXPORT VkResult VKAPI vkGetFenceStatus(
+ VkFence fence_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkResetFences(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkResetFences(
+ VkDevice device,
uint32_t fenceCount,
- VK_FENCE* pFences)
+ VkFence* pFences)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkWaitForFences(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkWaitForFences(
+ VkDevice device,
uint32_t fenceCount,
- const VK_FENCE* pFences,
+ const VkFence* pFences,
bool32_t waitAll,
uint64_t timeout)
{
@@ -1332,10 +1332,10 @@ ICD_EXPORT VK_RESULT VKAPI vkWaitForFences(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetFormatInfo(
- VK_DEVICE device,
- VK_FORMAT format,
- VK_FORMAT_INFO_TYPE infoType,
+ICD_EXPORT VkResult VKAPI vkGetFormatInfo(
+ VkDevice device,
+ VkFormat format,
+ VkFormatInfoType infoType,
size_t* pDataSize,
void* pData)
{
@@ -1343,9 +1343,9 @@ ICD_EXPORT VK_RESULT VKAPI vkGetFormatInfo(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetGpuInfo(
- VK_PHYSICAL_GPU gpu_,
- VK_PHYSICAL_GPU_INFO_TYPE infoType,
+ICD_EXPORT VkResult VKAPI vkGetGpuInfo(
+ VkPhysicalGpu gpu_,
+ VkPhysicalGpuInfoType infoType,
size_t* pDataSize,
void* pData)
{
@@ -1353,37 +1353,37 @@ ICD_EXPORT VK_RESULT VKAPI vkGetGpuInfo(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(
- VK_PHYSICAL_GPU gpu_,
+ICD_EXPORT VkResult VKAPI vkGetExtensionSupport(
+ VkPhysicalGpu gpu_,
const char* pExtName)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetMultiGpuCompatibility(
- VK_PHYSICAL_GPU gpu0_,
- VK_PHYSICAL_GPU gpu1_,
- VK_GPU_COMPATIBILITY_INFO* pInfo)
+ICD_EXPORT VkResult VKAPI vkGetMultiGpuCompatibility(
+ VkPhysicalGpu gpu0_,
+ VkPhysicalGpu gpu1_,
+ VkGpuCompatibilityInfo* pInfo)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkOpenPeerImage(
- VK_DEVICE device,
- const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo,
- VK_IMAGE* pImage,
- VK_GPU_MEMORY* pMem)
+ICD_EXPORT VkResult VKAPI vkOpenPeerImage(
+ VkDevice device,
+ const VkPeerImageOpenInfo* pOpenInfo,
+ VkImage* pImage,
+ VkGpuMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateImage(
- VK_DEVICE device,
- const VK_IMAGE_CREATE_INFO* pCreateInfo,
- VK_IMAGE* pImage)
+ICD_EXPORT VkResult VKAPI vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ VkImage* pImage)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1392,22 +1392,22 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateImage(
(struct nulldrv_img **) pImage);
}
-ICD_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(
- VK_IMAGE image,
- const VK_IMAGE_SUBRESOURCE* pSubresource,
- VK_SUBRESOURCE_INFO_TYPE infoType,
+ICD_EXPORT VkResult VKAPI vkGetImageSubresourceInfo(
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceInfoType infoType,
size_t* pDataSize,
void* pData)
{
NULLDRV_LOG_FUNC;
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
switch (infoType) {
case VK_INFO_TYPE_SUBRESOURCE_LAYOUT:
{
- VK_SUBRESOURCE_LAYOUT *layout = (VK_SUBRESOURCE_LAYOUT *) pData;
+ VkSubresourceLayout *layout = (VkSubresourceLayout *) pData;
- *pDataSize = sizeof(VK_SUBRESOURCE_LAYOUT);
+ *pDataSize = sizeof(VkSubresourceLayout);
if (pData == NULL)
return ret;
@@ -1425,10 +1425,10 @@ ICD_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(
return ret;
}
-ICD_EXPORT VK_RESULT VKAPI vkAllocMemory(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkAllocMemory(
+ VkDevice device,
const VkMemoryAllocInfo* pAllocInfo,
- VK_GPU_MEMORY* pMem)
+ VkGpuMemory* pMem)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1436,24 +1436,24 @@ ICD_EXPORT VK_RESULT VKAPI vkAllocMemory(
return nulldrv_mem_alloc(dev, pAllocInfo, (struct nulldrv_mem **) pMem);
}
-ICD_EXPORT VK_RESULT VKAPI vkFreeMemory(
- VK_GPU_MEMORY mem_)
+ICD_EXPORT VkResult VKAPI vkFreeMemory(
+ VkGpuMemory mem_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkSetMemoryPriority(
- VK_GPU_MEMORY mem_,
- VK_MEMORY_PRIORITY priority)
+ICD_EXPORT VkResult VKAPI vkSetMemoryPriority(
+ VkGpuMemory mem_,
+ VkMemoryPriority priority)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkMapMemory(
- VK_GPU_MEMORY mem_,
- VK_FLAGS flags,
+ICD_EXPORT VkResult VKAPI vkMapMemory(
+ VkGpuMemory mem_,
+ VkFlags flags,
void** ppData)
{
NULLDRV_LOG_FUNC;
@@ -1465,44 +1465,44 @@ ICD_EXPORT VK_RESULT VKAPI vkMapMemory(
return (ptr) ? VK_SUCCESS : VK_ERROR_UNKNOWN;
}
-ICD_EXPORT VK_RESULT VKAPI vkUnmapMemory(
- VK_GPU_MEMORY mem_)
+ICD_EXPORT VkResult VKAPI vkUnmapMemory(
+ VkGpuMemory mem_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkPinSystemMemory(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkPinSystemMemory(
+ VkDevice device,
const void* pSysMem,
size_t memSize,
- VK_GPU_MEMORY* pMem)
+ VkGpuMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(
- VK_DEVICE device,
- const VK_MEMORY_OPEN_INFO* pOpenInfo,
- VK_GPU_MEMORY* pMem)
+ICD_EXPORT VkResult VKAPI vkOpenSharedMemory(
+ VkDevice device,
+ const VkMemoryOpenInfo* pOpenInfo,
+ VkGpuMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(
- VK_DEVICE device,
- const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo,
- VK_GPU_MEMORY* pMem)
+ICD_EXPORT VkResult VKAPI vkOpenPeerMemory(
+ VkDevice device,
+ const VkPeerMemoryOpenInfo* pOpenInfo,
+ VkGpuMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateInstance(
+ICD_EXPORT VkResult VKAPI vkCreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
- VK_INSTANCE* pInstance)
+ VkInstance* pInstance)
{
NULLDRV_LOG_FUNC;
struct nulldrv_instance *inst;
@@ -1514,36 +1514,36 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateInstance(
inst->obj.base.get_info = NULL;
- *pInstance = (VK_INSTANCE*)inst;
+ *pInstance = (VkInstance*)inst;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDestroyInstance(
- VK_INSTANCE pInstance)
+ICD_EXPORT VkResult VKAPI vkDestroyInstance(
+ VkInstance pInstance)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkEnumerateGpus(
- VK_INSTANCE instance,
+ICD_EXPORT VkResult VKAPI vkEnumerateGpus(
+ VkInstance instance,
uint32_t maxGpus,
uint32_t* pGpuCount,
- VK_PHYSICAL_GPU* pGpus)
+ VkPhysicalGpu* pGpus)
{
NULLDRV_LOG_FUNC;
- VK_RESULT ret;
+ VkResult ret;
struct nulldrv_gpu *gpu;
*pGpuCount = 1;
ret = nulldrv_gpu_add(0, 0, 0, &gpu);
if (ret == VK_SUCCESS)
- pGpus[0] = (VK_PHYSICAL_GPU) gpu;
+ pGpus[0] = (VkPhysicalGpu) gpu;
return ret;
}
-ICD_EXPORT VK_RESULT VKAPI vkEnumerateLayers(
- VK_PHYSICAL_GPU gpu,
+ICD_EXPORT VkResult VKAPI vkEnumerateLayers(
+ VkPhysicalGpu gpu,
size_t maxLayerCount,
size_t maxStringSize,
size_t* pOutLayerCount,
@@ -1554,8 +1554,8 @@ ICD_EXPORT VK_RESULT VKAPI vkEnumerateLayers(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(
- VK_INSTANCE instance,
+ICD_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(
+ VkInstance instance,
VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback,
void* pUserData)
{
@@ -1563,16 +1563,16 @@ ICD_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(
- VK_INSTANCE instance,
+ICD_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(
+ VkInstance instance,
VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(
- VK_INSTANCE instance,
+ICD_EXPORT VkResult VKAPI vkDbgSetGlobalOption(
+ VkInstance instance,
VK_DBG_GLOBAL_OPTION dbgOption,
size_t dataSize,
const void* pData)
@@ -1581,16 +1581,16 @@ ICD_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDestroyObject(
- VK_OBJECT object)
+ICD_EXPORT VkResult VKAPI vkDestroyObject(
+ VkObject object)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetObjectInfo(
- VK_BASE_OBJECT object,
- VK_OBJECT_INFO_TYPE infoType,
+ICD_EXPORT VkResult VKAPI vkGetObjectInfo(
+ VkBaseObject object,
+ VkObjectInfoType infoType,
size_t* pDataSize,
void* pData)
{
@@ -1600,41 +1600,41 @@ ICD_EXPORT VK_RESULT VKAPI vkGetObjectInfo(
return base->get_info(base, infoType, pDataSize, pData);
}
-ICD_EXPORT VK_RESULT VKAPI vkBindObjectMemory(
- VK_OBJECT object,
+ICD_EXPORT VkResult VKAPI vkBindObjectMemory(
+ VkObject object,
uint32_t allocationIdx,
- VK_GPU_MEMORY mem_,
- VK_GPU_SIZE memOffset)
+ VkGpuMemory mem_,
+ VkGpuSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkBindObjectMemoryRange(
- VK_OBJECT object,
+ICD_EXPORT VkResult VKAPI vkBindObjectMemoryRange(
+ VkObject object,
uint32_t allocationIdx,
- VK_GPU_SIZE rangeOffset,
- VK_GPU_SIZE rangeSize,
- VK_GPU_MEMORY mem,
- VK_GPU_SIZE memOffset)
+ VkGpuSize rangeOffset,
+ VkGpuSize rangeSize,
+ VkGpuMemory mem,
+ VkGpuSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkBindImageMemoryRange(
- VK_IMAGE image,
+ICD_EXPORT VkResult VKAPI vkBindImageMemoryRange(
+ VkImage image,
uint32_t allocationIdx,
- const VK_IMAGE_MEMORY_BIND_INFO* bindInfo,
- VK_GPU_MEMORY mem,
- VK_GPU_SIZE memOffset)
+ const VkImageMemoryBindInfo* bindInfo,
+ VkGpuMemory mem,
+ VkGpuSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkDbgSetObjectTag(
- VK_BASE_OBJECT object,
+ICD_EXPORT VkResult VKAPI vkDbgSetObjectTag(
+ VkBaseObject object,
size_t tagSize,
const void* pTag)
{
@@ -1642,10 +1642,10 @@ ICD_EXPORT VK_RESULT VKAPI vkDbgSetObjectTag(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE* pPipeline)
+ICD_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline* pPipeline)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1654,11 +1654,11 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(
(struct nulldrv_pipeline **) pPipeline);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline)
+ICD_EXPORT VkResult VKAPI vkCreateGraphicsPipelineDerivative(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1667,17 +1667,17 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
(struct nulldrv_pipeline **) pPipeline);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(
- VK_DEVICE device,
- const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE* pPipeline)
+ICD_EXPORT VkResult VKAPI vkCreateComputePipeline(
+ VkDevice device,
+ const VkComputePipelineCreateInfo* pCreateInfo,
+ VkPipeline* pPipeline)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkStorePipeline(
- VK_PIPELINE pipeline,
+ICD_EXPORT VkResult VKAPI vkStorePipeline(
+ VkPipeline pipeline,
size_t* pDataSize,
void* pData)
{
@@ -1685,38 +1685,38 @@ ICD_EXPORT VK_RESULT VKAPI vkStorePipeline(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkLoadPipeline(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkLoadPipeline(
+ VkDevice device,
size_t dataSize,
const void* pData,
- VK_PIPELINE* pPipeline)
+ VkPipeline* pPipeline)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkLoadPipelineDerivative(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkLoadPipelineDerivative(
+ VkDevice device,
size_t dataSize,
const void* pData,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline)
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateQueryPool(
- VK_DEVICE device,
- const VK_QUERY_POOL_CREATE_INFO* pCreateInfo,
- VK_QUERY_POOL* pQueryPool)
+ICD_EXPORT VkResult VKAPI vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ VkQueryPool* pQueryPool)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkGetQueryPoolResults(
- VK_QUERY_POOL queryPool,
+ICD_EXPORT VkResult VKAPI vkGetQueryPoolResults(
+ VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount,
size_t* pDataSize,
@@ -1726,77 +1726,77 @@ ICD_EXPORT VK_RESULT VKAPI vkGetQueryPoolResults(
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueAddMemReference(
- VK_QUEUE queue,
- VK_GPU_MEMORY mem)
+ICD_EXPORT VkResult VKAPI vkQueueAddMemReference(
+ VkQueue queue,
+ VkGpuMemory mem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(
- VK_QUEUE queue,
- VK_GPU_MEMORY mem)
+ICD_EXPORT VkResult VKAPI vkQueueRemoveMemReference(
+ VkQueue queue,
+ VkGpuMemory mem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(
- VK_QUEUE queue_)
+ICD_EXPORT VkResult VKAPI vkQueueWaitIdle(
+ VkQueue queue_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueSubmit(
- VK_QUEUE queue_,
+ICD_EXPORT VkResult VKAPI vkQueueSubmit(
+ VkQueue queue_,
uint32_t cmdBufferCount,
- const VK_CMD_BUFFER* pCmdBuffers,
- VK_FENCE fence_)
+ const VkCmdBuffer* pCmdBuffers,
+ VkFence fence_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkOpenSharedSemaphore(
- VK_DEVICE device,
- const VK_SEMAPHORE_OPEN_INFO* pOpenInfo,
- VK_SEMAPHORE* pSemaphore)
+ICD_EXPORT VkResult VKAPI vkOpenSharedSemaphore(
+ VkDevice device,
+ const VkSemaphoreOpenInfo* pOpenInfo,
+ VkSemaphore* pSemaphore)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateSemaphore(
- VK_DEVICE device,
- const VK_SEMAPHORE_CREATE_INFO* pCreateInfo,
- VK_SEMAPHORE* pSemaphore)
+ICD_EXPORT VkResult VKAPI vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ VkSemaphore* pSemaphore)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueSignalSemaphore(
- VK_QUEUE queue,
- VK_SEMAPHORE semaphore)
+ICD_EXPORT VkResult VKAPI vkQueueSignalSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkQueueWaitSemaphore(
- VK_QUEUE queue,
- VK_SEMAPHORE semaphore)
+ICD_EXPORT VkResult VKAPI vkQueueWaitSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateSampler(
- VK_DEVICE device,
- const VK_SAMPLER_CREATE_INFO* pCreateInfo,
- VK_SAMPLER* pSampler)
+ICD_EXPORT VkResult VKAPI vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ VkSampler* pSampler)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1805,10 +1805,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateSampler(
(struct nulldrv_sampler **) pSampler);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateShader(
- VK_DEVICE device,
- const VK_SHADER_CREATE_INFO* pCreateInfo,
- VK_SHADER* pShader)
+ICD_EXPORT VkResult VKAPI vkCreateShader(
+ VkDevice device,
+ const VkShaderCreateInfo* pCreateInfo,
+ VkShader* pShader)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1816,10 +1816,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateShader(
return shader_create(dev, pCreateInfo, (struct nulldrv_shader **) pShader);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(
- VK_DEVICE device,
- const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_VP_STATE_OBJECT* pState)
+ICD_EXPORT VkResult VKAPI vkCreateDynamicViewportState(
+ VkDevice device,
+ const VkDynamicVpStateCreateInfo* pCreateInfo,
+ VkDynamicVpStateObject* pState)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1828,10 +1828,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(
(struct nulldrv_dynamic_vp **) pState);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(
- VK_DEVICE device,
- const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_RS_STATE_OBJECT* pState)
+ICD_EXPORT VkResult VKAPI vkCreateDynamicRasterState(
+ VkDevice device,
+ const VkDynamicRsStateCreateInfo* pCreateInfo,
+ VkDynamicRsStateObject* pState)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1840,10 +1840,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(
(struct nulldrv_dynamic_rs **) pState);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(
- VK_DEVICE device,
- const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_CB_STATE_OBJECT* pState)
+ICD_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(
+ VkDevice device,
+ const VkDynamicCbStateCreateInfo* pCreateInfo,
+ VkDynamicCbStateObject* pState)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1852,10 +1852,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(
(struct nulldrv_dynamic_cb **) pState);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(
- VK_DEVICE device,
- const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_DS_STATE_OBJECT* pState)
+ICD_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(
+ VkDevice device,
+ const VkDynamicDsStateCreateInfo* pCreateInfo,
+ VkDynamicDsStateObject* pState)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1864,10 +1864,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(
(struct nulldrv_dynamic_ds **) pState);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateBufferView(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkCreateBufferView(
+ VkDevice device,
const VkBufferViewCreateInfo* pCreateInfo,
- VK_BUFFER_VIEW* pView)
+ VkBufferView* pView)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1876,10 +1876,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateBufferView(
(struct nulldrv_buf_view **) pView);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateImageView(
- VK_DEVICE device,
- const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
- VK_IMAGE_VIEW* pView)
+ICD_EXPORT VkResult VKAPI vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ VkImageView* pView)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1888,10 +1888,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateImageView(
(struct nulldrv_img_view **) pView);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(
- VK_DEVICE device,
- const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
- VK_COLOR_ATTACHMENT_VIEW* pView)
+ICD_EXPORT VkResult VKAPI vkCreateColorAttachmentView(
+ VkDevice device,
+ const VkColorAttachmentViewCreateInfo* pCreateInfo,
+ VkColorAttachmentView* pView)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1900,10 +1900,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(
(struct nulldrv_rt_view **) pView);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(
- VK_DEVICE device,
- const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
- VK_DEPTH_STENCIL_VIEW* pView)
+ICD_EXPORT VkResult VKAPI vkCreateDepthStencilView(
+ VkDevice device,
+ const VkDepthStencilViewCreateInfo* pCreateInfo,
+ VkDepthStencilView* pView)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1913,10 +1913,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(
- VK_DEVICE device,
- const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo,
- VK_DESCRIPTOR_SET_LAYOUT* pSetLayout)
+ICD_EXPORT VkResult VKAPI vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ VkDescriptorSetLayout* pSetLayout)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1925,11 +1925,11 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(
(struct nulldrv_desc_layout **) pSetLayout);
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(
- VK_DEVICE device,
+ICD_EXPORT VkResult VKAPI vkCreateDescriptorSetLayoutChain(
+ VkDevice device,
uint32_t setLayoutArrayCount,
- const VK_DESCRIPTOR_SET_LAYOUT* pSetLayoutArray,
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN* pLayoutChain)
+ const VkDescriptorSetLayout* pSetLayoutArray,
+ VkDescriptorSetLayoutChain* pLayoutChain)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1939,28 +1939,28 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(
(struct nulldrv_desc_layout_chain **) pLayoutChain);
}
-ICD_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(
- VK_DEVICE device,
- VK_DESCRIPTOR_UPDATE_MODE updateMode)
+ICD_EXPORT VkResult VKAPI vkBeginDescriptorPoolUpdate(
+ VkDevice device,
+ VkDescriptorUpdateMode updateMode)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(
- VK_DEVICE device,
- VK_CMD_BUFFER cmd_)
+ICD_EXPORT VkResult VKAPI vkEndDescriptorPoolUpdate(
+ VkDevice device,
+ VkCmdBuffer cmd_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(
- VK_DEVICE device,
- VK_DESCRIPTOR_POOL_USAGE poolUsage,
+ICD_EXPORT VkResult VKAPI vkCreateDescriptorPool(
+ VkDevice device,
+ VkDescriptorPoolUsage poolUsage,
uint32_t maxSets,
- const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo,
- VK_DESCRIPTOR_POOL* pDescriptorPool)
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ VkDescriptorPool* pDescriptorPool)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -1969,30 +1969,30 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(
(struct nulldrv_desc_pool **) pDescriptorPool);
}
-ICD_EXPORT VK_RESULT VKAPI vkResetDescriptorPool(
- VK_DESCRIPTOR_POOL descriptorPool)
+ICD_EXPORT VkResult VKAPI vkResetDescriptorPool(
+ VkDescriptorPool descriptorPool)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
-ICD_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(
- VK_DESCRIPTOR_POOL descriptorPool,
- VK_DESCRIPTOR_SET_USAGE setUsage,
+ICD_EXPORT VkResult VKAPI vkAllocDescriptorSets(
+ VkDescriptorPool descriptorPool,
+ VkDescriptorSetUsage setUsage,
uint32_t count,
- const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts,
- VK_DESCRIPTOR_SET* pDescriptorSets,
+ const VkDescriptorSetLayout* pSetLayouts,
+ VkDescriptorSet* pDescriptorSets,
uint32_t* pCount)
{
NULLDRV_LOG_FUNC;
struct nulldrv_desc_pool *pool = nulldrv_desc_pool(descriptorPool);
struct nulldrv_dev *dev = pool->dev;
- VK_RESULT ret = VK_SUCCESS;
+ VkResult ret = VK_SUCCESS;
uint32_t i;
for (i = 0; i < count; i++) {
const struct nulldrv_desc_layout *layout =
- nulldrv_desc_layout((VK_DESCRIPTOR_SET_LAYOUT) pSetLayouts[i]);
+ nulldrv_desc_layout((VkDescriptorSetLayout) pSetLayouts[i]);
ret = nulldrv_desc_set_create(dev, pool, setUsage, layout,
(struct nulldrv_desc_set **) &pDescriptorSets[i]);
@@ -2007,25 +2007,25 @@ ICD_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(
}
ICD_EXPORT void VKAPI vkClearDescriptorSets(
- VK_DESCRIPTOR_POOL descriptorPool,
+ VkDescriptorPool descriptorPool,
uint32_t count,
- const VK_DESCRIPTOR_SET* pDescriptorSets)
+ const VkDescriptorSet* pDescriptorSets)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkUpdateDescriptors(
- VK_DESCRIPTOR_SET descriptorSet,
+ VkDescriptorSet descriptorSet,
uint32_t updateCount,
const void** ppUpdateArray)
{
NULLDRV_LOG_FUNC;
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(
- VK_DEVICE device,
- const VK_FRAMEBUFFER_CREATE_INFO* info,
- VK_FRAMEBUFFER* fb_ret)
+ICD_EXPORT VkResult VKAPI vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* info,
+ VkFramebuffer* fb_ret)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -2034,10 +2034,10 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(
}
-ICD_EXPORT VK_RESULT VKAPI vkCreateRenderPass(
- VK_DEVICE device,
- const VK_RENDER_PASS_CREATE_INFO* info,
- VK_RENDER_PASS* rp_ret)
+ICD_EXPORT VkResult VKAPI vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* info,
+ VkRenderPass* rp_ret)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
@@ -2046,15 +2046,15 @@ ICD_EXPORT VK_RESULT VKAPI vkCreateRenderPass(
}
ICD_EXPORT void VKAPI vkCmdBeginRenderPass(
- VK_CMD_BUFFER cmdBuffer,
- const VK_RENDER_PASS_BEGIN* pRenderPassBegin)
+ VkCmdBuffer cmdBuffer,
+ const VkRenderPassBegin* pRenderPassBegin)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdEndRenderPass(
- VK_CMD_BUFFER cmdBuffer,
- VK_RENDER_PASS renderPass)
+ VkCmdBuffer cmdBuffer,
+ VkRenderPass renderPass)
{
NULLDRV_LOG_FUNC;
}
@@ -2080,7 +2080,7 @@ ICD_EXPORT int xcbGetMessage(void *msg)
return 0;
}
-ICD_EXPORT VK_RESULT xcbQueuePresent(void *queue, void *image, void* fence)
+ICD_EXPORT VkResult xcbQueuePresent(void *queue, void *image, void* fence)
{
return VK_SUCCESS;
}
diff --git a/icd/nulldrv/nulldrv.h b/icd/nulldrv/nulldrv.h
index 22af6c6c..daf45be9 100644
--- a/icd/nulldrv/nulldrv.h
+++ b/icd/nulldrv/nulldrv.h
@@ -48,7 +48,7 @@
struct nulldrv_base {
void *loader_data;
uint32_t magic;
- VK_RESULT (*get_info)(struct nulldrv_base *base, int type1,
+ VkResult (*get_info)(struct nulldrv_base *base, int type1,
size_t *size, void *data);
};
@@ -100,12 +100,12 @@ struct nulldrv_fence {
struct nulldrv_img {
struct nulldrv_obj obj;
- VK_IMAGE_TYPE type;
+ VkImageType type;
int32_t depth;
uint32_t mip_levels;
uint32_t array_size;
- VK_FLAGS usage;
- VK_IMAGE_FORMAT_CLASS format_class;
+ VkFlags usage;
+ VkImageFormatClass format_class;
uint32_t samples;
size_t total_size;
};
@@ -113,7 +113,7 @@ struct nulldrv_img {
struct nulldrv_mem {
struct nulldrv_base base;
struct nulldrv_bo *bo;
- VK_GPU_SIZE size;
+ VkGpuSize size;
};
struct nulldrv_ds_view {
@@ -135,8 +135,8 @@ struct nulldrv_img_view {
struct nulldrv_buf {
struct nulldrv_obj obj;
- VK_GPU_SIZE size;
- VK_FLAGS usage;
+ VkGpuSize size;
+ VkFlags usage;
};
struct nulldrv_desc_layout {
diff --git a/include/vkDbg.h b/include/vkDbg.h
index fb224829..fcfdf318 100644
--- a/include/vkDbg.h
+++ b/include/vkDbg.h
@@ -101,66 +101,66 @@ typedef enum _VK_DBG_OBJECT_TYPE
typedef void (VKAPI *VK_DBG_MSG_CALLBACK_FUNCTION)(
VK_DBG_MSG_TYPE msgType,
- VK_VALIDATION_LEVEL validationLevel,
- VK_BASE_OBJECT srcObject,
+ VkValidationLevel validationLevel,
+ VkBaseObject srcObject,
size_t location,
int32_t msgCode,
const char* pMsg,
void* pUserData);
// Debug functions
-typedef VK_RESULT (VKAPI *vkDbgSetValidationLevelType)(VK_DEVICE device, VK_VALIDATION_LEVEL validationLevel);
-typedef VK_RESULT (VKAPI *vkDbgRegisterMsgCallbackType)(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData);
-typedef VK_RESULT (VKAPI *vkDbgUnregisterMsgCallbackType)(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback);
-typedef VK_RESULT (VKAPI *vkDbgSetMessageFilterType)(VK_DEVICE device, int32_t msgCode, VK_DBG_MSG_FILTER filter);
-typedef VK_RESULT (VKAPI *vkDbgSetObjectTagType)(VK_BASE_OBJECT object, size_t tagSize, const void* pTag);
-typedef VK_RESULT (VKAPI *vkDbgSetGlobalOptionType)(VK_INSTANCE instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData);
-typedef VK_RESULT (VKAPI *vkDbgSetDeviceOptionType)(VK_DEVICE device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData);
-typedef void (VKAPI *vkCmdDbgMarkerBeginType)(VK_CMD_BUFFER cmdBuffer, const char* pMarker);
-typedef void (VKAPI *vkCmdDbgMarkerEndType)(VK_CMD_BUFFER cmdBuffer);
+typedef VkResult (VKAPI *PFN_vkDbgSetValidationLevel)(VkDevice device, VkValidationLevel validationLevel);
+typedef VkResult (VKAPI *PFN_vkDbgRegisterMsgCallback)(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData);
+typedef VkResult (VKAPI *PFN_vkDbgUnregisterMsgCallback)(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback);
+typedef VkResult (VKAPI *PFN_vkDbgSetMessageFilter)(VkDevice device, int32_t msgCode, VK_DBG_MSG_FILTER filter);
+typedef VkResult (VKAPI *PFN_vkDbgSetObjectTag)(VkBaseObject object, size_t tagSize, const void* pTag);
+typedef VkResult (VKAPI *PFN_vkDbgSetGlobalOption)(VkInstance instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData);
+typedef VkResult (VKAPI *PFN_vkDbgSetDeviceOption)(VkDevice device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData);
+typedef void (VKAPI *PFN_vkCmdDbgMarkerBegin)(VkCmdBuffer cmdBuffer, const char* pMarker);
+typedef void (VKAPI *PFN_vkCmdDbgMarkerEnd)(VkCmdBuffer cmdBuffer);
#ifdef VK_PROTOTYPES
-VK_RESULT VKAPI vkDbgSetValidationLevel(
- VK_DEVICE device,
- VK_VALIDATION_LEVEL validationLevel);
+VkResult VKAPI vkDbgSetValidationLevel(
+ VkDevice device,
+ VkValidationLevel validationLevel);
-VK_RESULT VKAPI vkDbgRegisterMsgCallback(
- VK_INSTANCE instance,
+VkResult VKAPI vkDbgRegisterMsgCallback(
+ VkInstance instance,
VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback,
void* pUserData);
-VK_RESULT VKAPI vkDbgUnregisterMsgCallback(
- VK_INSTANCE instance,
+VkResult VKAPI vkDbgUnregisterMsgCallback(
+ VkInstance instance,
VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback);
-VK_RESULT VKAPI vkDbgSetMessageFilter(
- VK_DEVICE device,
+VkResult VKAPI vkDbgSetMessageFilter(
+ VkDevice device,
int32_t msgCode,
VK_DBG_MSG_FILTER filter);
-VK_RESULT VKAPI vkDbgSetObjectTag(
- VK_BASE_OBJECT object,
+VkResult VKAPI vkDbgSetObjectTag(
+ VkBaseObject object,
size_t tagSize,
const void* pTag);
-VK_RESULT VKAPI vkDbgSetGlobalOption(
- VK_INSTANCE instance,
+VkResult VKAPI vkDbgSetGlobalOption(
+ VkInstance instance,
VK_DBG_GLOBAL_OPTION dbgOption,
size_t dataSize,
const void* pData);
-VK_RESULT VKAPI vkDbgSetDeviceOption(
- VK_DEVICE device,
+VkResult VKAPI vkDbgSetDeviceOption(
+ VkDevice device,
VK_DBG_DEVICE_OPTION dbgOption,
size_t dataSize,
const void* pData);
void VKAPI vkCmdDbgMarkerBegin(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
const char* pMarker);
void VKAPI vkCmdDbgMarkerEnd(
- VK_CMD_BUFFER cmdBuffer);
+ VkCmdBuffer cmdBuffer);
#endif // VK_PROTOTYPES
diff --git a/include/vkLayer.h b/include/vkLayer.h
index c3679a00..f5b6fac3 100644
--- a/include/vkLayer.h
+++ b/include/vkLayer.h
@@ -20,141 +20,141 @@
typedef struct _VK_BASE_LAYER_OBJECT
{
- vkGetProcAddrType pGPA;
- VK_BASE_OBJECT nextObject;
- VK_BASE_OBJECT baseObject;
+ PFN_vkGetProcAddr pGPA;
+ VkBaseObject nextObject;
+ VkBaseObject baseObject;
} VK_BASE_LAYER_OBJECT;
typedef struct _VK_LAYER_DISPATCH_TABLE
{
- vkGetProcAddrType GetProcAddr;
- vkCreateInstanceType CreateInstance;
- vkDestroyInstanceType DestroyInstance;
- vkEnumerateGpusType EnumerateGpus;
- vkGetGpuInfoType GetGpuInfo;
- vkCreateDeviceType CreateDevice;
- vkDestroyDeviceType DestroyDevice;
- vkGetExtensionSupportType GetExtensionSupport;
- vkEnumerateLayersType EnumerateLayers;
- vkGetDeviceQueueType GetDeviceQueue;
- vkQueueSubmitType QueueSubmit;
- vkQueueAddMemReferenceType QueueAddMemReference;
- vkQueueRemoveMemReferenceType QueueRemoveMemReference;
- vkQueueWaitIdleType QueueWaitIdle;
- vkDeviceWaitIdleType DeviceWaitIdle;
- vkAllocMemoryType AllocMemory;
- vkFreeMemoryType FreeMemory;
- vkSetMemoryPriorityType SetMemoryPriority;
- vkMapMemoryType MapMemory;
- vkUnmapMemoryType UnmapMemory;
- vkPinSystemMemoryType PinSystemMemory;
- vkGetMultiGpuCompatibilityType GetMultiGpuCompatibility;
- vkOpenSharedMemoryType OpenSharedMemory;
- vkOpenSharedSemaphoreType OpenSharedSemaphore;
- vkOpenPeerMemoryType OpenPeerMemory;
- vkOpenPeerImageType OpenPeerImage;
- vkDestroyObjectType DestroyObject;
- vkGetObjectInfoType GetObjectInfo;
- vkBindObjectMemoryType BindObjectMemory;
- vkBindObjectMemoryRangeType BindObjectMemoryRange;
- vkBindImageMemoryRangeType BindImageMemoryRange;
- vkCreateFenceType CreateFence;
- vkGetFenceStatusType GetFenceStatus;
- vkResetFencesType ResetFences;
- vkWaitForFencesType WaitForFences;
- vkCreateSemaphoreType CreateSemaphore;
- vkQueueSignalSemaphoreType QueueSignalSemaphore;
- vkQueueWaitSemaphoreType QueueWaitSemaphore;
- vkCreateEventType CreateEvent;
- vkGetEventStatusType GetEventStatus;
- vkSetEventType SetEvent;
- vkResetEventType ResetEvent;
- vkCreateQueryPoolType CreateQueryPool;
- vkGetQueryPoolResultsType GetQueryPoolResults;
- vkGetFormatInfoType GetFormatInfo;
- vkCreateBufferType CreateBuffer;
- vkCreateBufferViewType CreateBufferView;
- vkCreateImageType CreateImage;
- vkGetImageSubresourceInfoType GetImageSubresourceInfo;
- vkCreateImageViewType CreateImageView;
- vkCreateColorAttachmentViewType CreateColorAttachmentView;
- vkCreateDepthStencilViewType CreateDepthStencilView;
- vkCreateShaderType CreateShader;
- vkCreateGraphicsPipelineType CreateGraphicsPipeline;
- vkCreateGraphicsPipelineDerivativeType CreateGraphicsPipelineDerivative;
- vkCreateComputePipelineType CreateComputePipeline;
- vkStorePipelineType StorePipeline;
- vkLoadPipelineType LoadPipeline;
- vkLoadPipelineDerivativeType LoadPipelineDerivative;
- vkCreateSamplerType CreateSampler;
- vkCreateDescriptorSetLayoutType CreateDescriptorSetLayout;
- vkCreateDescriptorSetLayoutChainType CreateDescriptorSetLayoutChain;
- vkBeginDescriptorPoolUpdateType BeginDescriptorPoolUpdate;
- vkEndDescriptorPoolUpdateType EndDescriptorPoolUpdate;
- vkCreateDescriptorPoolType CreateDescriptorPool;
- vkResetDescriptorPoolType ResetDescriptorPool;
- vkAllocDescriptorSetsType AllocDescriptorSets;
- vkClearDescriptorSetsType ClearDescriptorSets;
- vkUpdateDescriptorsType UpdateDescriptors;
- vkCreateDynamicViewportStateType CreateDynamicViewportState;
- vkCreateDynamicRasterStateType CreateDynamicRasterState;
- vkCreateDynamicColorBlendStateType CreateDynamicColorBlendState;
- vkCreateDynamicDepthStencilStateType CreateDynamicDepthStencilState;
- vkCreateCommandBufferType CreateCommandBuffer;
- vkBeginCommandBufferType BeginCommandBuffer;
- vkEndCommandBufferType EndCommandBuffer;
- vkResetCommandBufferType ResetCommandBuffer;
- vkCmdBindPipelineType CmdBindPipeline;
- vkCmdBindDynamicStateObjectType CmdBindDynamicStateObject;
- vkCmdBindDescriptorSetsType CmdBindDescriptorSets;
- vkCmdBindVertexBufferType CmdBindVertexBuffer;
- vkCmdBindIndexBufferType CmdBindIndexBuffer;
- vkCmdDrawType CmdDraw;
- vkCmdDrawIndexedType CmdDrawIndexed;
- vkCmdDrawIndirectType CmdDrawIndirect;
- vkCmdDrawIndexedIndirectType CmdDrawIndexedIndirect;
- vkCmdDispatchType CmdDispatch;
- vkCmdDispatchIndirectType CmdDispatchIndirect;
- vkCmdCopyBufferType CmdCopyBuffer;
- vkCmdCopyImageType CmdCopyImage;
- vkCmdBlitImageType CmdBlitImage;
- vkCmdCopyBufferToImageType CmdCopyBufferToImage;
- vkCmdCopyImageToBufferType CmdCopyImageToBuffer;
- vkCmdCloneImageDataType CmdCloneImageData;
- vkCmdUpdateBufferType CmdUpdateBuffer;
- vkCmdFillBufferType CmdFillBuffer;
- vkCmdClearColorImageType CmdClearColorImage;
- vkCmdClearDepthStencilType CmdClearDepthStencil;
- vkCmdResolveImageType CmdResolveImage;
- vkCmdSetEventType CmdSetEvent;
- vkCmdResetEventType CmdResetEvent;
- vkCmdWaitEventsType CmdWaitEvents;
- vkCmdPipelineBarrierType CmdPipelineBarrier;
- vkCmdBeginQueryType CmdBeginQuery;
- vkCmdEndQueryType CmdEndQuery;
- vkCmdResetQueryPoolType CmdResetQueryPool;
- vkCmdWriteTimestampType CmdWriteTimestamp;
- vkCmdInitAtomicCountersType CmdInitAtomicCounters;
- vkCmdLoadAtomicCountersType CmdLoadAtomicCounters;
- vkCmdSaveAtomicCountersType CmdSaveAtomicCounters;
- vkCreateFramebufferType CreateFramebuffer;
- vkCreateRenderPassType CreateRenderPass;
- vkCmdBeginRenderPassType CmdBeginRenderPass;
- vkCmdEndRenderPassType CmdEndRenderPass;
- vkDbgSetValidationLevelType DbgSetValidationLevel;
- vkDbgRegisterMsgCallbackType DbgRegisterMsgCallback;
- vkDbgUnregisterMsgCallbackType DbgUnregisterMsgCallback;
- vkDbgSetMessageFilterType DbgSetMessageFilter;
- vkDbgSetObjectTagType DbgSetObjectTag;
- vkDbgSetGlobalOptionType DbgSetGlobalOption;
- vkDbgSetDeviceOptionType DbgSetDeviceOption;
- vkCmdDbgMarkerBeginType CmdDbgMarkerBegin;
- vkCmdDbgMarkerEndType CmdDbgMarkerEnd;
+ PFN_vkGetProcAddr GetProcAddr;
+ PFN_vkCreateInstance CreateInstance;
+ PFN_vkDestroyInstance DestroyInstance;
+ PFN_vkEnumerateGpus EnumerateGpus;
+ PFN_vkGetGpuInfo GetGpuInfo;
+ PFN_vkCreateDevice CreateDevice;
+ PFN_vkDestroyDevice DestroyDevice;
+ PFN_vkGetExtensionSupport GetExtensionSupport;
+ PFN_vkEnumerateLayers EnumerateLayers;
+ PFN_vkGetDeviceQueue GetDeviceQueue;
+ PFN_vkQueueSubmit QueueSubmit;
+ PFN_vkQueueAddMemReference QueueAddMemReference;
+ PFN_vkQueueRemoveMemReference QueueRemoveMemReference;
+ PFN_vkQueueWaitIdle QueueWaitIdle;
+ PFN_vkDeviceWaitIdle DeviceWaitIdle;
+ PFN_vkAllocMemory AllocMemory;
+ PFN_vkFreeMemory FreeMemory;
+ PFN_vkSetMemoryPriority SetMemoryPriority;
+ PFN_vkMapMemory MapMemory;
+ PFN_vkUnmapMemory UnmapMemory;
+ PFN_vkPinSystemMemory PinSystemMemory;
+ PFN_vkGetMultiGpuCompatibility GetMultiGpuCompatibility;
+ PFN_vkOpenSharedMemory OpenSharedMemory;
+ PFN_vkOpenSharedSemaphore OpenSharedSemaphore;
+ PFN_vkOpenPeerMemory OpenPeerMemory;
+ PFN_vkOpenPeerImage OpenPeerImage;
+ PFN_vkDestroyObject DestroyObject;
+ PFN_vkGetObjectInfo GetObjectInfo;
+ PFN_vkBindObjectMemory BindObjectMemory;
+ PFN_vkBindObjectMemoryRange BindObjectMemoryRange;
+ PFN_vkBindImageMemoryRange BindImageMemoryRange;
+ PFN_vkCreateFence CreateFence;
+ PFN_vkGetFenceStatus GetFenceStatus;
+ PFN_vkResetFences ResetFences;
+ PFN_vkWaitForFences WaitForFences;
+ PFN_vkCreateSemaphore CreateSemaphore;
+ PFN_vkQueueSignalSemaphore QueueSignalSemaphore;
+ PFN_vkQueueWaitSemaphore QueueWaitSemaphore;
+ PFN_vkCreateEvent CreateEvent;
+ PFN_vkGetEventStatus GetEventStatus;
+ PFN_vkSetEvent SetEvent;
+ PFN_vkResetEvent ResetEvent;
+ PFN_vkCreateQueryPool CreateQueryPool;
+ PFN_vkGetQueryPoolResults GetQueryPoolResults;
+ PFN_vkGetFormatInfo GetFormatInfo;
+ PFN_vkCreateBuffer CreateBuffer;
+ PFN_vkCreateBufferView CreateBufferView;
+ PFN_vkCreateImage CreateImage;
+ PFN_vkGetImageSubresourceInfo GetImageSubresourceInfo;
+ PFN_vkCreateImageView CreateImageView;
+ PFN_vkCreateColorAttachmentView CreateColorAttachmentView;
+ PFN_vkCreateDepthStencilView CreateDepthStencilView;
+ PFN_vkCreateShader CreateShader;
+ PFN_vkCreateGraphicsPipeline CreateGraphicsPipeline;
+ PFN_vkCreateGraphicsPipelineDerivative CreateGraphicsPipelineDerivative;
+ PFN_vkCreateComputePipeline CreateComputePipeline;
+ PFN_vkStorePipeline StorePipeline;
+ PFN_vkLoadPipeline LoadPipeline;
+ PFN_vkLoadPipelineDerivative LoadPipelineDerivative;
+ PFN_vkCreateSampler CreateSampler;
+ PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout;
+ PFN_vkCreateDescriptorSetLayoutChain CreateDescriptorSetLayoutChain;
+ PFN_vkBeginDescriptorPoolUpdate BeginDescriptorPoolUpdate;
+ PFN_vkEndDescriptorPoolUpdate EndDescriptorPoolUpdate;
+ PFN_vkCreateDescriptorPool CreateDescriptorPool;
+ PFN_vkResetDescriptorPool ResetDescriptorPool;
+ PFN_vkAllocDescriptorSets AllocDescriptorSets;
+ PFN_vkClearDescriptorSets ClearDescriptorSets;
+ PFN_vkUpdateDescriptors UpdateDescriptors;
+ PFN_vkCreateDynamicViewportState CreateDynamicViewportState;
+ PFN_vkCreateDynamicRasterState CreateDynamicRasterState;
+ PFN_vkCreateDynamicColorBlendState CreateDynamicColorBlendState;
+ PFN_vkCreateDynamicDepthStencilState CreateDynamicDepthStencilState;
+ PFN_vkCreateCommandBuffer CreateCommandBuffer;
+ PFN_vkBeginCommandBuffer BeginCommandBuffer;
+ PFN_vkEndCommandBuffer EndCommandBuffer;
+ PFN_vkResetCommandBuffer ResetCommandBuffer;
+ PFN_vkCmdBindPipeline CmdBindPipeline;
+ PFN_vkCmdBindDynamicStateObject CmdBindDynamicStateObject;
+ PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets;
+ PFN_vkCmdBindVertexBuffer CmdBindVertexBuffer;
+ PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer;
+ PFN_vkCmdDraw CmdDraw;
+ PFN_vkCmdDrawIndexed CmdDrawIndexed;
+ PFN_vkCmdDrawIndirect CmdDrawIndirect;
+ PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect;
+ PFN_vkCmdDispatch CmdDispatch;
+ PFN_vkCmdDispatchIndirect CmdDispatchIndirect;
+ PFN_vkCmdCopyBuffer CmdCopyBuffer;
+ PFN_vkCmdCopyImage CmdCopyImage;
+ PFN_vkCmdBlitImage CmdBlitImage;
+ PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage;
+ PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer;
+ PFN_vkCmdCloneImageData CmdCloneImageData;
+ PFN_vkCmdUpdateBuffer CmdUpdateBuffer;
+ PFN_vkCmdFillBuffer CmdFillBuffer;
+ PFN_vkCmdClearColorImage CmdClearColorImage;
+ PFN_vkCmdClearDepthStencil CmdClearDepthStencil;
+ PFN_vkCmdResolveImage CmdResolveImage;
+ PFN_vkCmdSetEvent CmdSetEvent;
+ PFN_vkCmdResetEvent CmdResetEvent;
+ PFN_vkCmdWaitEvents CmdWaitEvents;
+ PFN_vkCmdPipelineBarrier CmdPipelineBarrier;
+ PFN_vkCmdBeginQuery CmdBeginQuery;
+ PFN_vkCmdEndQuery CmdEndQuery;
+ PFN_vkCmdResetQueryPool CmdResetQueryPool;
+ PFN_vkCmdWriteTimestamp CmdWriteTimestamp;
+ PFN_vkCmdInitAtomicCounters CmdInitAtomicCounters;
+ PFN_vkCmdLoadAtomicCounters CmdLoadAtomicCounters;
+ PFN_vkCmdSaveAtomicCounters CmdSaveAtomicCounters;
+ PFN_vkCreateFramebuffer CreateFramebuffer;
+ PFN_vkCreateRenderPass CreateRenderPass;
+ PFN_vkCmdBeginRenderPass CmdBeginRenderPass;
+ PFN_vkCmdEndRenderPass CmdEndRenderPass;
+ PFN_vkDbgSetValidationLevel DbgSetValidationLevel;
+ PFN_vkDbgRegisterMsgCallback DbgRegisterMsgCallback;
+ PFN_vkDbgUnregisterMsgCallback DbgUnregisterMsgCallback;
+ PFN_vkDbgSetMessageFilter DbgSetMessageFilter;
+ PFN_vkDbgSetObjectTag DbgSetObjectTag;
+ PFN_vkDbgSetGlobalOption DbgSetGlobalOption;
+ PFN_vkDbgSetDeviceOption DbgSetDeviceOption;
+ PFN_vkCmdDbgMarkerBegin CmdDbgMarkerBegin;
+ PFN_vkCmdDbgMarkerEnd CmdDbgMarkerEnd;
#if defined(__linux__) || defined(XCB_NVIDIA)
- vkWsiX11AssociateConnectionType WsiX11AssociateConnection;
- vkWsiX11GetMSCType WsiX11GetMSC;
- vkWsiX11CreatePresentableImageType WsiX11CreatePresentableImage;
- vkWsiX11QueuePresentType WsiX11QueuePresent;
+ PFN_vkWsiX11AssociateConnection WsiX11AssociateConnection;
+ PFN_vkWsiX11GetMSC WsiX11GetMSC;
+ PFN_vkWsiX11CreatePresentableImage WsiX11CreatePresentableImage;
+ PFN_vkWsiX11QueuePresent WsiX11QueuePresent;
#endif // WIN32
} VK_LAYER_DISPATCH_TABLE;
diff --git a/include/vkWsiX11Ext.h b/include/vkWsiX11Ext.h
index f7046375..2f1d6239 100644
--- a/include/vkWsiX11Ext.h
+++ b/include/vkWsiX11Ext.h
@@ -20,17 +20,17 @@ typedef struct _VK_WSI_X11_CONNECTION_INFO {
typedef struct _VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO
{
- VK_FORMAT format;
- VK_FLAGS usage; // VK_IMAGE_USAGE_FLAGS
- VK_EXTENT2D extent;
- VK_FLAGS flags;
+ VkFormat format;
+ VkFlags usage; // VkImageUsageFlags
+ VkExtent2D extent;
+ VkFlags flags;
} VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO;
typedef struct _VK_WSI_X11_PRESENT_INFO
{
/* which window to present to */
xcb_window_t destWindow;
- VK_IMAGE srcImage;
+ VkImage srcImage;
/**
* After the command buffers in the queue have been completed, if the MSC
@@ -80,10 +80,10 @@ typedef struct _VK_WSI_X11_PRESENT_INFO
bool32_t flip;
} VK_WSI_X11_PRESENT_INFO;
-typedef VK_RESULT (VKAPI *vkWsiX11AssociateConnectionType)(VK_PHYSICAL_GPU gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
-typedef VK_RESULT (VKAPI *vkWsiX11GetMSCType)(VK_DEVICE device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc);
-typedef VK_RESULT (VKAPI *vkWsiX11CreatePresentableImageType)(VK_DEVICE device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkWsiX11QueuePresentType)(VK_QUEUE queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VK_FENCE fence);
+typedef VkResult (VKAPI *PFN_vkWsiX11AssociateConnection)(VkPhysicalGpu gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
+typedef VkResult (VKAPI *PFN_vkWsiX11GetMSC)(VkDevice device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc);
+typedef VkResult (VKAPI *PFN_vkWsiX11CreatePresentableImage)(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkWsiX11QueuePresent)(VkQueue queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VkFence fence);
/**
* Associate an X11 connection with a GPU. This should be done before device
@@ -97,8 +97,8 @@ typedef VK_RESULT (VKAPI *vkWsiX11QueuePresentType)(VK_QUEUE queue, const VK_WSI
* This function is available when vkGetExtensionSupport says "VK_WSI_X11"
* is supported.
*/
-VK_RESULT VKAPI vkWsiX11AssociateConnection(
- VK_PHYSICAL_GPU gpu,
+VkResult VKAPI vkWsiX11AssociateConnection(
+ VkPhysicalGpu gpu,
const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
/**
@@ -106,23 +106,23 @@ VK_RESULT VKAPI vkWsiX11AssociateConnection(
* of \p crtc. If crtc is \p XCB_NONE, a suitable CRTC is picked based on \p
* win.
*/
-VK_RESULT VKAPI vkWsiX11GetMSC(
- VK_DEVICE device,
+VkResult VKAPI vkWsiX11GetMSC(
+ VkDevice device,
xcb_window_t window,
xcb_randr_crtc_t crtc,
uint64_t* pMsc);
/**
- * Create an VK_IMAGE that can be presented. An VK_GPU_MEMORY is created
+ * Create an VkImage that can be presented. An VkGpuMemory is created
* and bound automatically. The memory returned can only be used in
* vkQueue[Add|Remove]MemReference. Destroying the memory or binding another memory to the
* image is not allowed.
*/
-VK_RESULT VKAPI vkWsiX11CreatePresentableImage(
- VK_DEVICE device,
+VkResult VKAPI vkWsiX11CreatePresentableImage(
+ VkDevice device,
const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
- VK_IMAGE* pImage,
- VK_GPU_MEMORY* pMem);
+ VkImage* pImage,
+ VkGpuMemory* pMem);
/**
* Present an image to an X11 window. The presentation always occurs after
@@ -131,10 +131,10 @@ VK_RESULT VKAPI vkWsiX11CreatePresentableImage(
*
* Fence is reached when the presentation occurs.
*/
-VK_RESULT VKAPI vkWsiX11QueuePresent(
- VK_QUEUE queue,
+VkResult VKAPI vkWsiX11QueuePresent(
+ VkQueue queue,
const VK_WSI_X11_PRESENT_INFO* pPresentInfo,
- VK_FENCE fence);
+ VkFence fence);
#ifdef __cplusplus
} // extern "C"
diff --git a/include/vk_platform.h b/include/vk_platform.h
index fc73dd16..0f4927c7 100644
--- a/include/vk_platform.h
+++ b/include/vk_platform.h
@@ -76,11 +76,11 @@ extern "C"
#endif
#endif // !defined(VK_NO_STDINT_H)
-typedef uint64_t VK_GPU_SIZE;
+typedef uint64_t VkGpuSize;
typedef uint32_t bool32_t;
-typedef uint32_t VK_SAMPLE_MASK;
-typedef uint32_t VK_FLAGS;
+typedef uint32_t VkSampleMask;
+typedef uint32_t VkFlags;
typedef int32_t VK_ENUM;
#ifdef __cplusplus
diff --git a/include/vulkan.h b/include/vulkan.h
index ef5ce640..bea82b06 100644
--- a/include/vulkan.h
+++ b/include/vulkan.h
@@ -54,38 +54,38 @@ extern "C"
#define VK_DEFINE_SUBCLASS_HANDLE(_obj, _base) typedef void* _obj;
#endif // __cplusplus
-VK_DEFINE_HANDLE(VK_INSTANCE)
-VK_DEFINE_HANDLE(VK_PHYSICAL_GPU)
-VK_DEFINE_HANDLE(VK_BASE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DEVICE, VK_BASE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_QUEUE, VK_BASE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_GPU_MEMORY, VK_BASE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_OBJECT, VK_BASE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_BUFFER, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_BUFFER_VIEW, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_IMAGE, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_IMAGE_VIEW, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_COLOR_ATTACHMENT_VIEW, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DEPTH_STENCIL_VIEW, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_SHADER, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_PIPELINE, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_SAMPLER, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DESCRIPTOR_SET, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DESCRIPTOR_SET_LAYOUT, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DESCRIPTOR_SET_LAYOUT_CHAIN, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DESCRIPTOR_POOL, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DYNAMIC_STATE_OBJECT, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DYNAMIC_VP_STATE_OBJECT, VK_DYNAMIC_STATE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DYNAMIC_RS_STATE_OBJECT, VK_DYNAMIC_STATE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DYNAMIC_CB_STATE_OBJECT, VK_DYNAMIC_STATE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_DYNAMIC_DS_STATE_OBJECT, VK_DYNAMIC_STATE_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_CMD_BUFFER, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_FENCE, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_SEMAPHORE, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_EVENT, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_QUERY_POOL, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_FRAMEBUFFER, VK_OBJECT)
-VK_DEFINE_SUBCLASS_HANDLE(VK_RENDER_PASS, VK_OBJECT)
+VK_DEFINE_HANDLE(VkInstance)
+VK_DEFINE_HANDLE(VkPhysicalGpu)
+VK_DEFINE_HANDLE(VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDevice, VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkQueue, VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkGpuMemory, VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkObject, VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkBuffer, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkBufferView, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkImage, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkImageView, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkColorAttachmentView, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDepthStencilView, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkShader, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkPipeline, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkSampler, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDescriptorSet, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDescriptorSetLayout, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDescriptorSetLayoutChain, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDescriptorPool, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDynamicStateObject, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDynamicVpStateObject, VkDynamicStateObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDynamicRsStateObject, VkDynamicStateObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDynamicCbStateObject, VkDynamicStateObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDynamicDsStateObject, VkDynamicStateObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkCmdBuffer, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkFence, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkSemaphore, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkEvent, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkQueryPool, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkFramebuffer, VkObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkRenderPass, VkObject)
#define VK_MAX_PHYSICAL_GPUS 16
#define VK_MAX_PHYSICAL_GPU_NAME 256
@@ -108,7 +108,7 @@ VK_DEFINE_SUBCLASS_HANDLE(VK_RENDER_PASS, VK_OBJECT)
// Enumerations
-typedef enum _VK_MEMORY_PRIORITY
+typedef enum VkMemoryPriority_
{
VK_MEMORY_PRIORITY_UNUSED = 0x0,
VK_MEMORY_PRIORITY_VERY_LOW = 0x1,
@@ -120,10 +120,10 @@ typedef enum _VK_MEMORY_PRIORITY
VK_MEMORY_PRIORITY_BEGIN_RANGE = VK_MEMORY_PRIORITY_UNUSED,
VK_MEMORY_PRIORITY_END_RANGE = VK_MEMORY_PRIORITY_VERY_HIGH,
VK_NUM_MEMORY_PRIORITY = (VK_MEMORY_PRIORITY_END_RANGE - VK_MEMORY_PRIORITY_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_MEMORY_PRIORITY)
-} VK_MEMORY_PRIORITY;
+ VK_MAX_ENUM(VkMemoryPriority_)
+} VkMemoryPriority;
-typedef enum _VK_IMAGE_LAYOUT
+typedef enum VkImageLayout_
{
VK_IMAGE_LAYOUT_UNDEFINED = 0x00000000, // Implicit layout an image is when its contents are undefined due to various reasons (e.g. right after creation)
VK_IMAGE_LAYOUT_GENERAL = 0x00000001, // General layout when image can be used for any kind of access
@@ -138,10 +138,10 @@ typedef enum _VK_IMAGE_LAYOUT
VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL,
VK_NUM_IMAGE_LAYOUT = (VK_IMAGE_LAYOUT_END_RANGE - VK_IMAGE_LAYOUT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_LAYOUT)
-} VK_IMAGE_LAYOUT;
+ VK_MAX_ENUM(VkImageLayout_)
+} VkImageLayout;
-typedef enum _VK_PIPE_EVENT
+typedef enum VkPipeEvent_
{
VK_PIPE_EVENT_TOP_OF_PIPE = 0x00000001, // Set event before the GPU starts processing subsequent command
VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE = 0x00000002, // Set event when all pending vertex processing is complete
@@ -155,10 +155,10 @@ typedef enum _VK_PIPE_EVENT
VK_PIPE_EVENT_BEGIN_RANGE = VK_PIPE_EVENT_TOP_OF_PIPE,
VK_PIPE_EVENT_END_RANGE = VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE,
VK_NUM_PIPE_EVENT = (VK_PIPE_EVENT_END_RANGE - VK_PIPE_EVENT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PIPE_EVENT)
-} VK_PIPE_EVENT;
+ VK_MAX_ENUM(VkPipeEvent_)
+} VkPipeEvent;
-typedef enum _VK_WAIT_EVENT
+typedef enum VkWaitEvent_
{
VK_WAIT_EVENT_TOP_OF_PIPE = 0x00000001, // Wait event before the GPU starts processing subsequent commands
VK_WAIT_EVENT_BEFORE_RASTERIZATION = 0x00000002, // Wait event before rasterizing subsequent primitives
@@ -166,20 +166,20 @@ typedef enum _VK_WAIT_EVENT
VK_WAIT_EVENT_BEGIN_RANGE = VK_WAIT_EVENT_TOP_OF_PIPE,
VK_WAIT_EVENT_END_RANGE = VK_WAIT_EVENT_BEFORE_RASTERIZATION,
VK_NUM_WAIT_EVENT = (VK_WAIT_EVENT_END_RANGE - VK_WAIT_EVENT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_WAIT_EVENT)
-} VK_WAIT_EVENT;
+ VK_MAX_ENUM(VkWaitEvent_)
+} VkWaitEvent;
-typedef enum _VK_MEMORY_OUTPUT_FLAGS
+typedef enum VkMemoryOutputFlags_
{
VK_MEMORY_OUTPUT_CPU_WRITE_BIT = 0x00000001, // Controls output coherency of CPU writes
VK_MEMORY_OUTPUT_SHADER_WRITE_BIT = 0x00000002, // Controls output coherency of generic shader writes
VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT = 0x00000004, // Controls output coherency of color attachment writes
VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000008, // Controls output coherency of depth/stencil attachment writes
VK_MEMORY_OUTPUT_COPY_BIT = 0x00000010, // Controls output coherency of copy operations
- VK_MAX_ENUM(_VK_MEMORY_OUTPUT_FLAGS)
-} VK_MEMORY_OUTPUT_FLAGS;
+ VK_MAX_ENUM(VkMemoryOutputFlags_)
+} VkMemoryOutputFlags;
-typedef enum _VK_MEMORY_INPUT_FLAGS
+typedef enum VkMemoryInputFlags_
{
VK_MEMORY_INPUT_CPU_READ_BIT = 0x00000001, // Controls input coherency of CPU reads
VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT = 0x00000002, // Controls input coherency of indirect command reads
@@ -190,10 +190,10 @@ typedef enum _VK_MEMORY_INPUT_FLAGS
VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT = 0x00000040, // Controls input coherency of color attachment reads
VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000080, // Controls input coherency of depth/stencil attachment reads
VK_MEMORY_INPUT_COPY_BIT = 0x00000100, // Controls input coherency of copy operations
- VK_MAX_ENUM(_VK_MEMORY_INPUT_FLAGS)
-} VK_MEMORY_INPUT_FLAGS;
+ VK_MAX_ENUM(VkMemoryInputFlags_)
+} VkMemoryInputFlags;
-typedef enum _VK_ATTACHMENT_LOAD_OP
+typedef enum VkAttachmentLoadOp_
{
VK_ATTACHMENT_LOAD_OP_LOAD = 0x00000000,
VK_ATTACHMENT_LOAD_OP_CLEAR = 0x00000001,
@@ -202,10 +202,10 @@ typedef enum _VK_ATTACHMENT_LOAD_OP
VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_NUM_ATTACHMENT_LOAD_OP = (VK_ATTACHMENT_LOAD_OP_END_RANGE - VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_ATTACHMENT_LOAD_OP)
-} VK_ATTACHMENT_LOAD_OP;
+ VK_MAX_ENUM(VkAttachmentLoadOp_)
+} VkAttachmentLoadOp;
-typedef enum _VK_ATTACHMENT_STORE_OP
+typedef enum VkAttachmentStoreOp_
{
VK_ATTACHMENT_STORE_OP_STORE = 0x00000000,
VK_ATTACHMENT_STORE_OP_RESOLVE_MSAA = 0x00000001,
@@ -214,10 +214,10 @@ typedef enum _VK_ATTACHMENT_STORE_OP
VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_NUM_ATTACHMENT_STORE_OP = (VK_ATTACHMENT_STORE_OP_END_RANGE - VK_ATTACHMENT_STORE_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_ATTACHMENT_STORE_OP)
-} VK_ATTACHMENT_STORE_OP;
+ VK_MAX_ENUM(VkAttachmentStoreOp_)
+} VkAttachmentStoreOp;
-typedef enum _VK_IMAGE_TYPE
+typedef enum VkImageType_
{
VK_IMAGE_1D = 0x00000000,
VK_IMAGE_2D = 0x00000001,
@@ -226,10 +226,10 @@ typedef enum _VK_IMAGE_TYPE
VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_1D,
VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_3D,
VK_NUM_IMAGE_TYPE = (VK_IMAGE_TYPE_END_RANGE - VK_IMAGE_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_TYPE)
-} VK_IMAGE_TYPE;
+ VK_MAX_ENUM(VkImageType_)
+} VkImageType;
-typedef enum _VK_IMAGE_TILING
+typedef enum VkImageTiling_
{
VK_LINEAR_TILING = 0x00000000,
VK_OPTIMAL_TILING = 0x00000001,
@@ -237,10 +237,10 @@ typedef enum _VK_IMAGE_TILING
VK_IMAGE_TILING_BEGIN_RANGE = VK_LINEAR_TILING,
VK_IMAGE_TILING_END_RANGE = VK_OPTIMAL_TILING,
VK_NUM_IMAGE_TILING = (VK_IMAGE_TILING_END_RANGE - VK_IMAGE_TILING_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_TILING)
-} VK_IMAGE_TILING;
+ VK_MAX_ENUM(VkImageTiling_)
+} VkImageTiling;
-typedef enum _VK_IMAGE_VIEW_TYPE
+typedef enum VkImageViewType_
{
VK_IMAGE_VIEW_1D = 0x00000000,
VK_IMAGE_VIEW_2D = 0x00000001,
@@ -250,10 +250,10 @@ typedef enum _VK_IMAGE_VIEW_TYPE
VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_1D,
VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_CUBE,
VK_NUM_IMAGE_VIEW_TYPE = (VK_IMAGE_VIEW_TYPE_END_RANGE - VK_IMAGE_VIEW_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_VIEW_TYPE)
-} VK_IMAGE_VIEW_TYPE;
+ VK_MAX_ENUM(VkImageViewType_)
+} VkImageViewType;
-typedef enum _VK_IMAGE_ASPECT
+typedef enum VkImageAspect_
{
VK_IMAGE_ASPECT_COLOR = 0x00000000,
VK_IMAGE_ASPECT_DEPTH = 0x00000001,
@@ -262,10 +262,10 @@ typedef enum _VK_IMAGE_ASPECT
VK_IMAGE_ASPECT_BEGIN_RANGE = VK_IMAGE_ASPECT_COLOR,
VK_IMAGE_ASPECT_END_RANGE = VK_IMAGE_ASPECT_STENCIL,
VK_NUM_IMAGE_ASPECT = (VK_IMAGE_ASPECT_END_RANGE - VK_IMAGE_ASPECT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_ASPECT)
-} VK_IMAGE_ASPECT;
+ VK_MAX_ENUM(VkImageAspect_)
+} VkImageAspect;
-typedef enum _VK_CHANNEL_SWIZZLE
+typedef enum VkChannelSwizzle_
{
VK_CHANNEL_SWIZZLE_ZERO = 0x00000000,
VK_CHANNEL_SWIZZLE_ONE = 0x00000001,
@@ -277,10 +277,10 @@ typedef enum _VK_CHANNEL_SWIZZLE
VK_CHANNEL_SWIZZLE_BEGIN_RANGE = VK_CHANNEL_SWIZZLE_ZERO,
VK_CHANNEL_SWIZZLE_END_RANGE = VK_CHANNEL_SWIZZLE_A,
VK_NUM_CHANNEL_SWIZZLE = (VK_CHANNEL_SWIZZLE_END_RANGE - VK_CHANNEL_SWIZZLE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_CHANNEL_SWIZZLE)
-} VK_CHANNEL_SWIZZLE;
+ VK_MAX_ENUM(VkChannelSwizzle_)
+} VkChannelSwizzle;
-typedef enum _VK_DESCRIPTOR_TYPE
+typedef enum VkDescriptorType_
{
VK_DESCRIPTOR_TYPE_SAMPLER = 0x00000000,
VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE = 0x00000001,
@@ -296,10 +296,10 @@ typedef enum _VK_DESCRIPTOR_TYPE
VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_SHADER_STORAGE_BUFFER_DYNAMIC,
VK_NUM_DESCRIPTOR_TYPE = (VK_DESCRIPTOR_TYPE_END_RANGE - VK_DESCRIPTOR_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_DESCRIPTOR_TYPE)
-} VK_DESCRIPTOR_TYPE;
+ VK_MAX_ENUM(VkDescriptorType_)
+} VkDescriptorType;
-typedef enum _VK_DESCRIPTOR_POOL_USAGE
+typedef enum VkDescriptorPoolUsage_
{
VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT = 0x00000000,
VK_DESCRIPTOR_POOL_USAGE_DYNAMIC = 0x00000001,
@@ -307,10 +307,10 @@ typedef enum _VK_DESCRIPTOR_POOL_USAGE
VK_DESCRIPTOR_POOL_USAGE_BEGIN_RANGE = VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT,
VK_DESCRIPTOR_POOL_USAGE_END_RANGE = VK_DESCRIPTOR_POOL_USAGE_DYNAMIC,
VK_NUM_DESCRIPTOR_POOL_USAGE = (VK_DESCRIPTOR_POOL_USAGE_END_RANGE - VK_DESCRIPTOR_POOL_USAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_DESCRIPTOR_POOL_USAGE)
-} VK_DESCRIPTOR_POOL_USAGE;
+ VK_MAX_ENUM(VkDescriptorPoolUsage_)
+} VkDescriptorPoolUsage;
-typedef enum _VK_DESCRIPTOR_UPDATE_MODE
+typedef enum VkDescriptorUpdateMode_
{
VK_DESCRIPTOR_UDPATE_MODE_COPY = 0x00000000,
VK_DESCRIPTOR_UPDATE_MODE_FASTEST = 0x00000001,
@@ -318,10 +318,10 @@ typedef enum _VK_DESCRIPTOR_UPDATE_MODE
VK_DESCRIPTOR_UPDATE_MODE_BEGIN_RANGE = VK_DESCRIPTOR_UDPATE_MODE_COPY,
VK_DESCRIPTOR_UPDATE_MODE_END_RANGE = VK_DESCRIPTOR_UPDATE_MODE_FASTEST,
VK_NUM_DESCRIPTOR_UPDATE_MODE = (VK_DESCRIPTOR_UPDATE_MODE_END_RANGE - VK_DESCRIPTOR_UPDATE_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_DESCRIPTOR_UPDATE_MODE)
-} VK_DESCRIPTOR_UPDATE_MODE;
+ VK_MAX_ENUM(VkDescriptorUpdateMode_)
+} VkDescriptorUpdateMode;
-typedef enum _VK_DESCRIPTOR_SET_USAGE
+typedef enum VkDescriptorSetUsage_
{
VK_DESCRIPTOR_SET_USAGE_ONE_SHOT = 0x00000000,
VK_DESCRIPTOR_SET_USAGE_STATIC = 0x00000001,
@@ -329,10 +329,10 @@ typedef enum _VK_DESCRIPTOR_SET_USAGE
VK_DESCRIPTOR_SET_USAGE_BEGIN_RANGE = VK_DESCRIPTOR_SET_USAGE_ONE_SHOT,
VK_DESCRIPTOR_SET_USAGE_END_RANGE = VK_DESCRIPTOR_SET_USAGE_STATIC,
VK_NUM_DESCRIPTOR_SET_USAGE = (VK_DESCRIPTOR_SET_USAGE_END_RANGE - VK_DESCRIPTOR_SET_USAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_DESCRIPTOR_SET_USAGE)
-} VK_DESCRIPTOR_SET_USAGE;
+ VK_MAX_ENUM(VkDescriptorSetUsage_)
+} VkDescriptorSetUsage;
-typedef enum _VK_QUERY_TYPE
+typedef enum VkQueryType_
{
VK_QUERY_OCCLUSION = 0x00000000,
VK_QUERY_PIPELINE_STATISTICS = 0x00000001,
@@ -340,10 +340,10 @@ typedef enum _VK_QUERY_TYPE
VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_OCCLUSION,
VK_QUERY_TYPE_END_RANGE = VK_QUERY_PIPELINE_STATISTICS,
VK_NUM_QUERY_TYPE = (VK_QUERY_TYPE_END_RANGE - VK_QUERY_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_QUERY_TYPE)
-} VK_QUERY_TYPE;
+ VK_MAX_ENUM(VkQueryType_)
+} VkQueryType;
-typedef enum _VK_TIMESTAMP_TYPE
+typedef enum VkTimestampType_
{
VK_TIMESTAMP_TOP = 0x00000000,
VK_TIMESTAMP_BOTTOM = 0x00000001,
@@ -352,9 +352,9 @@ typedef enum _VK_TIMESTAMP_TYPE
VK_TIMESTAMP_TYPE_END_RANGE = VK_TIMESTAMP_BOTTOM,
VK_NUM_TIMESTAMP_TYPE = (VK_TIMESTAMP_TYPE_END_RANGE - VK_TIMESTAMP_TYPE_BEGIN_RANGE + 1),
VK_MAX_ENUM(_VK_TIMESTEAMP_TYPE)
-} VK_TIMESTAMP_TYPE;
+} VkTimestampType;
-typedef enum _VK_BORDER_COLOR_TYPE
+typedef enum VkBorderColorType_
{
VK_BORDER_COLOR_OPAQUE_WHITE = 0x00000000,
VK_BORDER_COLOR_TRANSPARENT_BLACK = 0x00000001,
@@ -363,10 +363,10 @@ typedef enum _VK_BORDER_COLOR_TYPE
VK_BORDER_COLOR_TYPE_BEGIN_RANGE = VK_BORDER_COLOR_OPAQUE_WHITE,
VK_BORDER_COLOR_TYPE_END_RANGE = VK_BORDER_COLOR_OPAQUE_BLACK,
VK_NUM_BORDER_COLOR_TYPE = (VK_BORDER_COLOR_TYPE_END_RANGE - VK_BORDER_COLOR_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_BORDER_COLOR_TYPE)
-} VK_BORDER_COLOR_TYPE;
+ VK_MAX_ENUM(VkBorderColorType_)
+} VkBorderColorType;
-typedef enum _VK_PIPELINE_BIND_POINT
+typedef enum VkPipelineBindPoint_
{
VK_PIPELINE_BIND_POINT_COMPUTE = 0x00000000,
VK_PIPELINE_BIND_POINT_GRAPHICS = 0x00000001,
@@ -374,10 +374,10 @@ typedef enum _VK_PIPELINE_BIND_POINT
VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
VK_NUM_PIPELINE_BIND_POINT = (VK_PIPELINE_BIND_POINT_END_RANGE - VK_PIPELINE_BIND_POINT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PIPELINE_BIND_POINT)
-} VK_PIPELINE_BIND_POINT;
+ VK_MAX_ENUM(VkPipelineBindPoint_)
+} VkPipelineBindPoint;
-typedef enum _VK_STATE_BIND_POINT
+typedef enum VkStateBindPoint_
{
VK_STATE_BIND_VIEWPORT = 0x00000000,
VK_STATE_BIND_RASTER = 0x00000001,
@@ -387,10 +387,10 @@ typedef enum _VK_STATE_BIND_POINT
VK_STATE_BIND_POINT_BEGIN_RANGE = VK_STATE_BIND_VIEWPORT,
VK_STATE_BIND_POINT_END_RANGE = VK_STATE_BIND_DEPTH_STENCIL,
VK_NUM_STATE_BIND_POINT = (VK_STATE_BIND_POINT_END_RANGE - VK_STATE_BIND_POINT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_STATE_BIND_POINT)
-} VK_STATE_BIND_POINT;
+ VK_MAX_ENUM(VkStateBindPoint_)
+} VkStateBindPoint;
-typedef enum _VK_PRIMITIVE_TOPOLOGY
+typedef enum VkPrimitiveTopology_
{
VK_TOPOLOGY_POINT_LIST = 0x00000000,
VK_TOPOLOGY_LINE_LIST = 0x00000001,
@@ -407,10 +407,10 @@ typedef enum _VK_PRIMITIVE_TOPOLOGY
VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_TOPOLOGY_POINT_LIST,
VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_TOPOLOGY_PATCH,
VK_NUM_PRIMITIVE_TOPOLOGY = (VK_PRIMITIVE_TOPOLOGY_END_RANGE - VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PRIMITIVE_TOPOLOGY)
-} VK_PRIMITIVE_TOPOLOGY;
+ VK_MAX_ENUM(VkPrimitiveTopology_)
+} VkPrimitiveTopology;
-typedef enum _VK_INDEX_TYPE
+typedef enum VkIndexType_
{
VK_INDEX_8 = 0x00000000,
VK_INDEX_16 = 0x00000001,
@@ -419,10 +419,10 @@ typedef enum _VK_INDEX_TYPE
VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_8,
VK_INDEX_TYPE_END_RANGE = VK_INDEX_32,
VK_NUM_INDEX_TYPE = (VK_INDEX_TYPE_END_RANGE - VK_INDEX_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_INDEX_TYPE)
-} VK_INDEX_TYPE;
+ VK_MAX_ENUM(VkIndexType_)
+} VkIndexType;
-typedef enum _VK_TEX_FILTER
+typedef enum VkTexFilter_
{
VK_TEX_FILTER_NEAREST = 0,
VK_TEX_FILTER_LINEAR = 1,
@@ -430,10 +430,10 @@ typedef enum _VK_TEX_FILTER
VK_TEX_FILTER_BEGIN_RANGE = VK_TEX_FILTER_NEAREST,
VK_TEX_FILTER_END_RANGE = VK_TEX_FILTER_LINEAR,
VK_NUM_TEX_FILTER = (VK_TEX_FILTER_END_RANGE - VK_TEX_FILTER_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_TEX_FILTER)
-} VK_TEX_FILTER;
+ VK_MAX_ENUM(VkTexFilter_)
+} VkTexFilter;
-typedef enum _VK_TEX_MIPMAP_MODE
+typedef enum VkTexMipmapMode_
{
VK_TEX_MIPMAP_BASE = 0, // Always choose base level
VK_TEX_MIPMAP_NEAREST = 1, // Choose nearest mip level
@@ -442,10 +442,10 @@ typedef enum _VK_TEX_MIPMAP_MODE
VK_TEX_MIPMAP_BEGIN_RANGE = VK_TEX_MIPMAP_BASE,
VK_TEX_MIPMAP_END_RANGE = VK_TEX_MIPMAP_LINEAR,
VK_NUM_TEX_MIPMAP = (VK_TEX_MIPMAP_END_RANGE - VK_TEX_MIPMAP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_TEX_MIPMAP_MODE)
-} VK_TEX_MIPMAP_MODE;
+ VK_MAX_ENUM(VkTexMipmapMode_)
+} VkTexMipmapMode;
-typedef enum _VK_TEX_ADDRESS
+typedef enum VkTexAddress_
{
VK_TEX_ADDRESS_WRAP = 0x00000000,
VK_TEX_ADDRESS_MIRROR = 0x00000001,
@@ -456,10 +456,10 @@ typedef enum _VK_TEX_ADDRESS
VK_TEX_ADDRESS_BEGIN_RANGE = VK_TEX_ADDRESS_WRAP,
VK_TEX_ADDRESS_END_RANGE = VK_TEX_ADDRESS_CLAMP_BORDER,
VK_NUM_TEX_ADDRESS = (VK_TEX_ADDRESS_END_RANGE - VK_TEX_ADDRESS_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_TEX_ADDRESS)
-} VK_TEX_ADDRESS;
+ VK_MAX_ENUM(VkTexAddress_)
+} VkTexAddress;
-typedef enum _VK_COMPARE_FUNC
+typedef enum VkCompareFunc_
{
VK_COMPARE_NEVER = 0x00000000,
VK_COMPARE_LESS = 0x00000001,
@@ -473,10 +473,10 @@ typedef enum _VK_COMPARE_FUNC
VK_COMPARE_FUNC_BEGIN_RANGE = VK_COMPARE_NEVER,
VK_COMPARE_FUNC_END_RANGE = VK_COMPARE_ALWAYS,
VK_NUM_COMPARE_FUNC = (VK_COMPARE_FUNC_END_RANGE - VK_COMPARE_FUNC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_COMPARE_FUNC)
-} VK_COMPARE_FUNC;
+ VK_MAX_ENUM(VkCompareFunc_)
+} VkCompareFunc;
-typedef enum _VK_FILL_MODE
+typedef enum VkFillMode_
{
VK_FILL_POINTS = 0x00000000,
VK_FILL_WIREFRAME = 0x00000001,
@@ -485,10 +485,10 @@ typedef enum _VK_FILL_MODE
VK_FILL_MODE_BEGIN_RANGE = VK_FILL_POINTS,
VK_FILL_MODE_END_RANGE = VK_FILL_SOLID,
VK_NUM_FILL_MODE = (VK_FILL_MODE_END_RANGE - VK_FILL_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_FILL_MODE)
-} VK_FILL_MODE;
+ VK_MAX_ENUM(VkFillMode_)
+} VkFillMode;
-typedef enum _VK_CULL_MODE
+typedef enum VkCullMode_
{
VK_CULL_NONE = 0x00000000,
VK_CULL_FRONT = 0x00000001,
@@ -498,10 +498,10 @@ typedef enum _VK_CULL_MODE
VK_CULL_MODE_BEGIN_RANGE = VK_CULL_NONE,
VK_CULL_MODE_END_RANGE = VK_CULL_FRONT_AND_BACK,
VK_NUM_CULL_MODE = (VK_CULL_MODE_END_RANGE - VK_CULL_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_CULL_MODE)
-} VK_CULL_MODE;
+ VK_MAX_ENUM(VkCullMode_)
+} VkCullMode;
-typedef enum _VK_FACE_ORIENTATION
+typedef enum VkFaceOrientation_
{
VK_FRONT_FACE_CCW = 0x00000000,
VK_FRONT_FACE_CW = 0x00000001,
@@ -509,10 +509,10 @@ typedef enum _VK_FACE_ORIENTATION
VK_FACE_ORIENTATION_BEGIN_RANGE = VK_FRONT_FACE_CCW,
VK_FACE_ORIENTATION_END_RANGE = VK_FRONT_FACE_CW,
VK_NUM_FACE_ORIENTATION = (VK_FACE_ORIENTATION_END_RANGE - VK_FACE_ORIENTATION_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_FACE_ORIENTATION)
-} VK_FACE_ORIENTATION;
+ VK_MAX_ENUM(VkFaceOrientation_)
+} VkFaceOrientation;
-typedef enum _VK_PROVOKING_VERTEX_CONVENTION
+typedef enum VkProvokingVertexConvention_
{
VK_PROVOKING_VERTEX_FIRST = 0x00000000,
VK_PROVOKING_VERTEX_LAST = 0x00000001,
@@ -520,10 +520,10 @@ typedef enum _VK_PROVOKING_VERTEX_CONVENTION
VK_PROVOKING_VERTEX_BEGIN_RANGE = VK_PROVOKING_VERTEX_FIRST,
VK_PROVOKING_VERTEX_END_RANGE = VK_PROVOKING_VERTEX_LAST,
VK_NUM_PROVOKING_VERTEX_CONVENTION = (VK_PROVOKING_VERTEX_END_RANGE - VK_PROVOKING_VERTEX_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PROVOKING_VERTEX_CONVENTION)
-} VK_PROVOKING_VERTEX_CONVENTION;
+ VK_MAX_ENUM(VkProvokingVertexConvention_)
+} VkProvokingVertexConvention;
-typedef enum _VK_COORDINATE_ORIGIN
+typedef enum VkCoordinateOrigin_
{
VK_COORDINATE_ORIGIN_UPPER_LEFT = 0x00000000,
VK_COORDINATE_ORIGIN_LOWER_LEFT = 0x00000001,
@@ -531,10 +531,10 @@ typedef enum _VK_COORDINATE_ORIGIN
VK_COORDINATE_ORIGIN_BEGIN_RANGE = VK_COORDINATE_ORIGIN_UPPER_LEFT,
VK_COORDINATE_ORIGIN_END_RANGE = VK_COORDINATE_ORIGIN_LOWER_LEFT,
VK_NUM_COORDINATE_ORIGIN = (VK_COORDINATE_ORIGIN_END_RANGE - VK_COORDINATE_ORIGIN_END_RANGE + 1),
- VK_MAX_ENUM(_VK_COORDINATE_ORIGIN)
-} VK_COORDINATE_ORIGIN;
+ VK_MAX_ENUM(VkCoordinateOrigin_)
+} VkCoordinateOrigin;
-typedef enum _VK_DEPTH_MODE
+typedef enum VkDepthMode_
{
VK_DEPTH_MODE_ZERO_TO_ONE = 0x00000000,
VK_DEPTH_MODE_NEGATIVE_ONE_TO_ONE = 0x00000001,
@@ -542,10 +542,10 @@ typedef enum _VK_DEPTH_MODE
VK_DEPTH_MODE_BEGIN_RANGE = VK_DEPTH_MODE_ZERO_TO_ONE,
VK_DEPTH_MODE_END_RANGE = VK_DEPTH_MODE_NEGATIVE_ONE_TO_ONE,
VK_NUM_DEPTH_MODE = (VK_DEPTH_MODE_END_RANGE - VK_DEPTH_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_DEPTH_MODE)
-} VK_DEPTH_MODE;
+ VK_MAX_ENUM(VkDepthMode_)
+} VkDepthMode;
-typedef enum _VK_BLEND
+typedef enum VkBlend_
{
VK_BLEND_ZERO = 0x00000000,
VK_BLEND_ONE = 0x00000001,
@@ -570,10 +570,10 @@ typedef enum _VK_BLEND
VK_BLEND_BEGIN_RANGE = VK_BLEND_ZERO,
VK_BLEND_END_RANGE = VK_BLEND_ONE_MINUS_SRC1_ALPHA,
VK_NUM_BLEND = (VK_BLEND_END_RANGE - VK_BLEND_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_BLEND)
-} VK_BLEND;
+ VK_MAX_ENUM(VkBlend_)
+} VkBlend;
-typedef enum _VK_BLEND_FUNC
+typedef enum VkBlendFunc_
{
VK_BLEND_FUNC_ADD = 0x00000000,
VK_BLEND_FUNC_SUBTRACT = 0x00000001,
@@ -584,10 +584,10 @@ typedef enum _VK_BLEND_FUNC
VK_BLEND_FUNC_BEGIN_RANGE = VK_BLEND_FUNC_ADD,
VK_BLEND_FUNC_END_RANGE = VK_BLEND_FUNC_MAX,
VK_NUM_BLEND_FUNC = (VK_BLEND_FUNC_END_RANGE - VK_BLEND_FUNC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_BLEND_FUNC)
-} VK_BLEND_FUNC;
+ VK_MAX_ENUM(VkBlendFunc_)
+} VkBlendFunc;
-typedef enum _VK_STENCIL_OP
+typedef enum VkStencilOp_
{
VK_STENCIL_OP_KEEP = 0x00000000,
VK_STENCIL_OP_ZERO = 0x00000001,
@@ -601,10 +601,10 @@ typedef enum _VK_STENCIL_OP
VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DEC_WRAP,
VK_NUM_STENCIL_OP = (VK_STENCIL_OP_END_RANGE - VK_STENCIL_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_STENCIL_OP)
-} VK_STENCIL_OP;
+ VK_MAX_ENUM(VkStencilOp_)
+} VkStencilOp;
-typedef enum _VK_LOGIC_OP
+typedef enum VkLogicOp_
{
VK_LOGIC_OP_COPY = 0x00000000,
VK_LOGIC_OP_CLEAR = 0x00000001,
@@ -626,10 +626,10 @@ typedef enum _VK_LOGIC_OP
VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_COPY,
VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
VK_NUM_LOGIC_OP = (VK_LOGIC_OP_END_RANGE - VK_LOGIC_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_LOGIC_OP)
-} VK_LOGIC_OP;
+ VK_MAX_ENUM(VkLogicOp_)
+} VkLogicOp;
-typedef enum _VK_SYSTEM_ALLOC_TYPE
+typedef enum VkSystemAllocType_
{
VK_SYSTEM_ALLOC_API_OBJECT = 0x00000000,
VK_SYSTEM_ALLOC_INTERNAL = 0x00000001,
@@ -640,10 +640,10 @@ typedef enum _VK_SYSTEM_ALLOC_TYPE
VK_SYSTEM_ALLOC_BEGIN_RANGE = VK_SYSTEM_ALLOC_API_OBJECT,
VK_SYSTEM_ALLOC_END_RANGE = VK_SYSTEM_ALLOC_DEBUG,
VK_NUM_SYSTEM_ALLOC_TYPE = (VK_SYSTEM_ALLOC_END_RANGE - VK_SYSTEM_ALLOC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_SYSTEM_ALLOC_TYPE)
-} VK_SYSTEM_ALLOC_TYPE;
+ VK_MAX_ENUM(VkSystemAllocType_)
+} VkSystemAllocType;
-typedef enum _VK_PHYSICAL_GPU_TYPE
+typedef enum VkPhysicalGpuType_
{
VK_GPU_TYPE_OTHER = 0x00000000,
VK_GPU_TYPE_INTEGRATED = 0x00000001,
@@ -653,10 +653,10 @@ typedef enum _VK_PHYSICAL_GPU_TYPE
VK_PHYSICAL_GPU_TYPE_BEGIN_RANGE = VK_GPU_TYPE_OTHER,
VK_PHYSICAL_GPU_TYPE_END_RANGE = VK_GPU_TYPE_VIRTUAL,
VK_NUM_PHYSICAL_GPU_TYPE = (VK_PHYSICAL_GPU_TYPE_END_RANGE - VK_PHYSICAL_GPU_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PHYSICAL_GPU_TYPE)
-} VK_PHYSICAL_GPU_TYPE;
+ VK_MAX_ENUM(VkPhysicalGpuType_)
+} VkPhysicalGpuType;
-typedef enum _VK_PHYSICAL_GPU_INFO_TYPE
+typedef enum VkPhysicalGpuInfoType_
{
// Info type for vkGetGpuInfo()
VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES = 0x00000000,
@@ -667,10 +667,10 @@ typedef enum _VK_PHYSICAL_GPU_INFO_TYPE
VK_INFO_TYPE_PHYSICAL_GPU_BEGIN_RANGE = VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
VK_INFO_TYPE_PHYSICAL_GPU_END_RANGE = VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES,
VK_NUM_INFO_TYPE_PHYSICAL_GPU = (VK_INFO_TYPE_PHYSICAL_GPU_END_RANGE - VK_INFO_TYPE_PHYSICAL_GPU_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PHYSICAL_GPU_INFO_TYPE)
-} VK_PHYSICAL_GPU_INFO_TYPE;
+ VK_MAX_ENUM(VkPhysicalGpuInfoType_)
+} VkPhysicalGpuInfoType;
-typedef enum _VK_FORMAT_INFO_TYPE
+typedef enum VkFormatInfoType_
{
// Info type for vkGetFormatInfo()
VK_INFO_TYPE_FORMAT_PROPERTIES = 0x00000000,
@@ -678,10 +678,10 @@ typedef enum _VK_FORMAT_INFO_TYPE
VK_INFO_TYPE_FORMAT_BEGIN_RANGE = VK_INFO_TYPE_FORMAT_PROPERTIES,
VK_INFO_TYPE_FORMAT_END_RANGE = VK_INFO_TYPE_FORMAT_PROPERTIES,
VK_NUM_INFO_TYPE_FORMAT = (VK_INFO_TYPE_FORMAT_END_RANGE - VK_INFO_TYPE_FORMAT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_FORMAT_INFO_TYPE)
-} VK_FORMAT_INFO_TYPE;
+ VK_MAX_ENUM(VkFormatInfoType_)
+} VkFormatInfoType;
-typedef enum _VK_SUBRESOURCE_INFO_TYPE
+typedef enum VkSubresourceInfoType_
{
// Info type for vkGetImageSubresourceInfo()
VK_INFO_TYPE_SUBRESOURCE_LAYOUT = 0x00000000,
@@ -689,10 +689,10 @@ typedef enum _VK_SUBRESOURCE_INFO_TYPE
VK_INFO_TYPE_SUBRESOURCE_BEGIN_RANGE = VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
VK_INFO_TYPE_SUBRESOURCE_END_RANGE = VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
VK_NUM_INFO_TYPE_SUBRESOURCE = (VK_INFO_TYPE_SUBRESOURCE_END_RANGE - VK_INFO_TYPE_SUBRESOURCE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_SUBRESOURCE_INFO_TYPE)
-} VK_SUBRESOURCE_INFO_TYPE;
+ VK_MAX_ENUM(VkSubresourceInfoType_)
+} VkSubresourceInfoType;
-typedef enum _VK_OBJECT_INFO_TYPE
+typedef enum VkObjectInfoType_
{
// Info type for vkGetObjectInfo()
VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT = 0x00000000,
@@ -703,10 +703,10 @@ typedef enum _VK_OBJECT_INFO_TYPE
VK_INFO_TYPE_BEGIN_RANGE = VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
VK_INFO_TYPE_END_RANGE = VK_INFO_TYPE_IMAGE_MEMORY_REQUIREMENTS,
VK_NUM_INFO_TYPE = (VK_INFO_TYPE_END_RANGE - VK_INFO_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_OBJECT_INFO_TYPE)
-} VK_OBJECT_INFO_TYPE;
+ VK_MAX_ENUM(VkObjectInfoType_)
+} VkObjectInfoType;
-typedef enum _VK_VALIDATION_LEVEL
+typedef enum VkValidationLevel_
{
VK_VALIDATION_LEVEL_0 = 0x00000000,
VK_VALIDATION_LEVEL_1 = 0x00000001,
@@ -718,13 +718,13 @@ typedef enum _VK_VALIDATION_LEVEL
VK_VALIDATION_LEVEL_END_RANGE = VK_VALIDATION_LEVEL_4,
VK_NUM_VALIDATION_LEVEL = (VK_VALIDATION_LEVEL_END_RANGE - VK_VALIDATION_LEVEL_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_VALIDATION_LEVEL)
-} VK_VALIDATION_LEVEL;
+ VK_MAX_ENUM(VkValidationLevel_)
+} VkValidationLevel;
// ------------------------------------------------------------------------------------------------
// Error and return codes
-typedef enum _VK_RESULT
+typedef enum VkResult_
{
// Return codes for successful operation execution (> = 0)
VK_SUCCESS = 0x0000000,
@@ -770,12 +770,12 @@ typedef enum _VK_RESULT
VK_ERROR_INCOMPATIBLE_QUEUE = -(0x00000021),
VK_ERROR_NOT_SHAREABLE = -(0x00000022),
VK_MAX_ENUM(_VK_RESULT_CODE)
-} VK_RESULT;
+} VkResult;
// ------------------------------------------------------------------------------------------------
// Vulkan format definitions
-typedef enum _VK_VERTEX_INPUT_STEP_RATE
+typedef enum VkVertexInputStepRate_
{
VK_VERTEX_INPUT_STEP_RATE_VERTEX = 0x0,
VK_VERTEX_INPUT_STEP_RATE_INSTANCE = 0x1,
@@ -784,10 +784,10 @@ typedef enum _VK_VERTEX_INPUT_STEP_RATE
VK_VERTEX_INPUT_STEP_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_STEP_RATE_VERTEX,
VK_VERTEX_INPUT_STEP_RATE_END_RANGE = VK_VERTEX_INPUT_STEP_RATE_DRAW,
VK_NUM_VERTEX_INPUT_STEP_RATE = (VK_VERTEX_INPUT_STEP_RATE_END_RANGE - VK_VERTEX_INPUT_STEP_RATE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_VERTEX_INPUT_STEP_RATE)
-} VK_VERTEX_INPUT_STEP_RATE;
+ VK_MAX_ENUM(VkVertexInputStepRate_)
+} VkVertexInputStepRate;
-typedef enum _VK_FORMAT
+typedef enum VkFormat_
{
VK_FMT_UNDEFINED = 0x00000000,
VK_FMT_R4G4_UNORM = 0x00000001,
@@ -967,11 +967,11 @@ typedef enum _VK_FORMAT
VK_FMT_BEGIN_RANGE = VK_FMT_UNDEFINED,
VK_FMT_END_RANGE = VK_FMT_B10G10R10A2_SINT,
VK_NUM_FMT = (VK_FMT_END_RANGE - VK_FMT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_FORMAT)
-} VK_FORMAT;
+ VK_MAX_ENUM(VkFormat_)
+} VkFormat;
// Shader stage enumerant
-typedef enum _VK_PIPELINE_SHADER_STAGE
+typedef enum VkPipelineShaderStage_
{
VK_SHADER_STAGE_VERTEX = 0,
VK_SHADER_STAGE_TESS_CONTROL = 1,
@@ -983,10 +983,10 @@ typedef enum _VK_PIPELINE_SHADER_STAGE
VK_SHADER_STAGE_BEGIN_RANGE = VK_SHADER_STAGE_VERTEX,
VK_SHADER_STAGE_END_RANGE = VK_SHADER_STAGE_COMPUTE,
VK_NUM_SHADER_STAGE = (VK_SHADER_STAGE_END_RANGE - VK_SHADER_STAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_PIPELINE_SHADER_STAGE)
-} VK_PIPELINE_SHADER_STAGE;
+ VK_MAX_ENUM(VkPipelineShaderStage_)
+} VkPipelineShaderStage;
-typedef enum _VK_SHADER_STAGE_FLAGS
+typedef enum VkShaderStageFlags_
{
VK_SHADER_STAGE_FLAGS_VERTEX_BIT = 0x00000001,
VK_SHADER_STAGE_FLAGS_TESS_CONTROL_BIT = 0x00000002,
@@ -996,11 +996,11 @@ typedef enum _VK_SHADER_STAGE_FLAGS
VK_SHADER_STAGE_FLAGS_COMPUTE_BIT = 0x00000020,
VK_SHADER_STAGE_FLAGS_ALL = 0x7FFFFFFF,
- VK_MAX_ENUM(_VK_SHADER_STAGE_FLAGS)
-} VK_SHADER_STAGE_FLAGS;
+ VK_MAX_ENUM(VkShaderStageFlags_)
+} VkShaderStageFlags;
// Structure type enumerant
-typedef enum _VK_STRUCTURE_TYPE
+typedef enum VkStructureType_
{
VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 1,
@@ -1063,32 +1063,32 @@ typedef enum _VK_STRUCTURE_TYPE
VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
VK_NUM_STRUCTURE_TYPE = (VK_STRUCTURE_TYPE_END_RANGE - VK_STRUCTURE_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_STRUCTURE_TYPE)
-} VK_STRUCTURE_TYPE;
+ VK_MAX_ENUM(VkStructureType_)
+} VkStructureType;
// ------------------------------------------------------------------------------------------------
// Flags
// Device creation flags
-typedef enum _VK_DEVICE_CREATE_FLAGS
+typedef enum VkDeviceCreateFlags_
{
VK_DEVICE_CREATE_VALIDATION_BIT = 0x00000001,
VK_DEVICE_CREATE_MGPU_IQ_MATCH_BIT = 0x00000002,
- VK_MAX_ENUM(_VK_DEVICE_CREATE_FLAGS)
-} VK_DEVICE_CREATE_FLAGS;
+ VK_MAX_ENUM(VkDeviceCreateFlags_)
+} VkDeviceCreateFlags;
// Queue capabilities
-typedef enum _VK_QUEUE_FLAGS
+typedef enum VkQueueFlags_
{
VK_QUEUE_GRAPHICS_BIT = 0x00000001, // Queue supports graphics operations
VK_QUEUE_COMPUTE_BIT = 0x00000002, // Queue supports compute operations
VK_QUEUE_DMA_BIT = 0x00000004, // Queue supports DMA operations
VK_QUEUE_EXTENDED_BIT = 0x40000000, // Extended queue
- VK_MAX_ENUM(_VK_QUEUE_FLAGS)
-} VK_QUEUE_FLAGS;
+ VK_MAX_ENUM(VkQueueFlags_)
+} VkQueueFlags;
// memory properties passed into vkAllocMemory().
-typedef enum _VK_MEMORY_PROPERTY_FLAGS
+typedef enum VkMemoryPropertyFlags_
{
VK_MEMORY_PROPERTY_GPU_ONLY = 0x00000000, // If not set, then allocate memory on device (GPU)
VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT = 0x00000001,
@@ -1097,10 +1097,10 @@ typedef enum _VK_MEMORY_PROPERTY_FLAGS
VK_MEMORY_PROPERTY_CPU_WRITE_COMBINED_BIT = 0x00000008,
VK_MEMORY_PROPERTY_PREFER_CPU_LOCAL = 0x00000010, // all else being equal, prefer CPU access
VK_MEMORY_PROPERTY_SHAREABLE_BIT = 0x00000020,
- VK_MAX_ENUM(_VK_MEMORY_PROPERTY_FLAGS)
-} VK_MEMORY_PROPERTY_FLAGS;
+ VK_MAX_ENUM(VkMemoryPropertyFlags_)
+} VkMemoryPropertyFlags;
-typedef enum _VK_MEMORY_TYPE
+typedef enum VkMemoryType_
{
VK_MEMORY_TYPE_OTHER = 0x00000000, // device memory that is not any of the others
VK_MEMORY_TYPE_BUFFER = 0x00000001, // memory for buffers and associated information
@@ -1109,11 +1109,11 @@ typedef enum _VK_MEMORY_TYPE
VK_MEMORY_TYPE_BEGIN_RANGE = VK_MEMORY_TYPE_OTHER,
VK_MEMORY_TYPE_END_RANGE = VK_MEMORY_TYPE_IMAGE,
VK_NUM_MEMORY_TYPE = (VK_MEMORY_TYPE_END_RANGE - VK_MEMORY_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_MEMORY_TYPE)
-} VK_MEMORY_TYPE;
+ VK_MAX_ENUM(VkMemoryType_)
+} VkMemoryType;
// Buffer and buffer allocation usage flags
-typedef enum _VK_BUFFER_USAGE_FLAGS
+typedef enum VkBufferUsageFlags_
{
VK_BUFFER_USAGE_GENERAL = 0x00000000, // no special usage
VK_BUFFER_USAGE_SHADER_ACCESS_READ_BIT = 0x00000001, // Shader read (e.g. TBO, image buffer, UBO, SSBO)
@@ -1128,18 +1128,18 @@ typedef enum _VK_BUFFER_USAGE_FLAGS
VK_BUFFER_USAGE_INDIRECT_PARAMETER_FETCH_BIT = 0x00000200, // Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)
VK_BUFFER_USAGE_TEXTURE_BUFFER_BIT = 0x00000400, // texture buffer (TBO)
VK_BUFFER_USAGE_IMAGE_BUFFER_BIT = 0x00000800, // image buffer (load/store)
- VK_MAX_ENUM(_VK_BUFFER_USAGE_FLAGS)
-} VK_BUFFER_USAGE_FLAGS;
+ VK_MAX_ENUM(VkBufferUsageFlags_)
+} VkBufferUsageFlags;
// Buffer flags
-typedef enum _VK_BUFFER_CREATE_FLAGS
+typedef enum VkBufferCreateFlags_
{
VK_BUFFER_CREATE_SHAREABLE_BIT = 0x00000001,
VK_BUFFER_CREATE_SPARSE_BIT = 0x00000002,
- VK_MAX_ENUM(_VK_BUFFER_CREATE_FLAGS)
-} VK_BUFFER_CREATE_FLAGS;
+ VK_MAX_ENUM(VkBufferCreateFlags_)
+} VkBufferCreateFlags;
-typedef enum _VK_BUFFER_VIEW_TYPE
+typedef enum VkBufferViewType_
{
VK_BUFFER_VIEW_RAW = 0x00000000, // Raw buffer without special structure (e.g. UBO, SSBO, indirect and parameter buffers)
VK_BUFFER_VIEW_TYPED = 0x00000001, // Typed buffer, format and channels are used (TBO, image buffer)
@@ -1147,12 +1147,12 @@ typedef enum _VK_BUFFER_VIEW_TYPE
VK_BUFFER_VIEW_TYPE_BEGIN_RANGE = VK_BUFFER_VIEW_RAW,
VK_BUFFER_VIEW_TYPE_END_RANGE = VK_BUFFER_VIEW_TYPED,
VK_NUM_BUFFER_VIEW_TYPE = (VK_BUFFER_VIEW_TYPE_END_RANGE - VK_BUFFER_VIEW_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_BUFFER_VIEW_TYPE)
-} VK_BUFFER_VIEW_TYPE;
+ VK_MAX_ENUM(VkBufferViewType_)
+} VkBufferViewType;
// Images memory allocations can be used for resources of a given format class.
-typedef enum _VK_IMAGE_FORMAT_CLASS
+typedef enum VkImageFormatClass_
{
VK_IMAGE_FORMAT_CLASS_128_BITS = 1, // color formats
VK_IMAGE_FORMAT_CLASS_96_BITS = 2,
@@ -1176,11 +1176,11 @@ typedef enum _VK_IMAGE_FORMAT_CLASS
VK_IMAGE_FORMAT_CLASS_BEGIN_RANGE = VK_IMAGE_FORMAT_CLASS_128_BITS,
VK_IMAGE_FORMAT_CLASS_END_RANGE = VK_IMAGE_FORMAT_CLASS_LINEAR,
VK_NUM_IMAGE_FORMAT_CLASS = (VK_IMAGE_FORMAT_CLASS_END_RANGE - VK_IMAGE_FORMAT_CLASS_BEGIN_RANGE + 1),
- VK_MAX_ENUM(_VK_IMAGE_FORMAT_CLASS)
-} VK_IMAGE_FORMAT_CLASS;
+ VK_MAX_ENUM(VkImageFormatClass_)
+} VkImageFormatClass;
// Image and image allocation usage flags
-typedef enum _VK_IMAGE_USAGE_FLAGS
+typedef enum VkImageUsageFlags_
{
VK_IMAGE_USAGE_GENERAL = 0x00000000, // no special usage
VK_IMAGE_USAGE_SHADER_ACCESS_READ_BIT = 0x00000001, // shader read (e.g. texture, image)
@@ -1193,52 +1193,52 @@ typedef enum _VK_IMAGE_USAGE_FLAGS
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000080, // framebuffer color attachment
VK_IMAGE_USAGE_DEPTH_STENCIL_BIT = 0x00000100, // framebuffer depth/stencil
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000200, // image data not needed outside of rendering.
- VK_MAX_ENUM(_VK_IMAGE_USAGE_FLAGS)
-} VK_IMAGE_USAGE_FLAGS;
+ VK_MAX_ENUM(VkImageUsageFlags_)
+} VkImageUsageFlags;
// Image flags
-typedef enum _VK_IMAGE_CREATE_FLAGS
+typedef enum VkImageCreateFlags_
{
VK_IMAGE_CREATE_INVARIANT_DATA_BIT = 0x00000001,
VK_IMAGE_CREATE_CLONEABLE_BIT = 0x00000002,
VK_IMAGE_CREATE_SHAREABLE_BIT = 0x00000004,
VK_IMAGE_CREATE_SPARSE_BIT = 0x00000008,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000010, // Allows image views to have different format than the base image
- VK_MAX_ENUM(_VK_IMAGE_CREATE_FLAGS)
-} VK_IMAGE_CREATE_FLAGS;
+ VK_MAX_ENUM(VkImageCreateFlags_)
+} VkImageCreateFlags;
// Depth-stencil view creation flags
-typedef enum _VK_DEPTH_STENCIL_VIEW_CREATE_FLAGS
+typedef enum VkDepthStencilViewCreateFlags_
{
VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_DEPTH_BIT = 0x00000001,
VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_STENCIL_BIT = 0x00000002,
- VK_MAX_ENUM(_VK_DEPTH_STENCIL_VIEW_CREATE_FLAGS)
-} VK_DEPTH_STENCIL_VIEW_CREATE_FLAGS;
+ VK_MAX_ENUM(VkDepthStencilViewCreateFlags_)
+} VkDepthStencilViewCreateFlags;
// Pipeline creation flags
-typedef enum _VK_PIPELINE_CREATE_FLAGS
+typedef enum VkPipelineCreateFlags_
{
VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
- VK_MAX_ENUM(_VK_PIPELINE_CREATE_FLAGS)
-} VK_PIPELINE_CREATE_FLAGS;
+ VK_MAX_ENUM(VkPipelineCreateFlags_)
+} VkPipelineCreateFlags;
// Fence creation flags
-typedef enum _VK_FENCE_CREATE_FLAGS
+typedef enum VkFenceCreateFlags_
{
VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
- VK_MAX_ENUM(_VK_FENCE_CREATE_FLAGS)
-} VK_FENCE_CREATE_FLAGS;
+ VK_MAX_ENUM(VkFenceCreateFlags_)
+} VkFenceCreateFlags;
// Semaphore creation flags
-typedef enum _VK_SEMAPHORE_CREATE_FLAGS
+typedef enum VkSemaphoreCreateFlags_
{
VK_SEMAPHORE_CREATE_SHAREABLE_BIT = 0x00000001,
- VK_MAX_ENUM(_VK_SEMAPHORE_CREATE_FLAGS)
-} VK_SEMAPHORE_CREATE_FLAGS;
+ VK_MAX_ENUM(VkSemaphoreCreateFlags_)
+} VkSemaphoreCreateFlags;
// Format capability flags
-typedef enum _VK_FORMAT_FEATURE_FLAGS
+typedef enum VkFormatFeatureFlags_
{
VK_FORMAT_IMAGE_SHADER_READ_BIT = 0x00000001,
VK_FORMAT_IMAGE_SHADER_WRITE_BIT = 0x00000002,
@@ -1250,18 +1250,18 @@ typedef enum _VK_FORMAT_FEATURE_FLAGS
VK_FORMAT_STENCIL_ATTACHMENT_BIT = 0x00000080,
VK_FORMAT_MSAA_ATTACHMENT_BIT = 0x00000100,
VK_FORMAT_CONVERSION_BIT = 0x00000200,
- VK_MAX_ENUM(_VK_FORMAT_FEATURE_FLAGS)
-} VK_FORMAT_FEATURE_FLAGS;
+ VK_MAX_ENUM(VkFormatFeatureFlags_)
+} VkFormatFeatureFlags;
// Query flags
-typedef enum _VK_QUERY_CONTROL_FLAGS
+typedef enum VkQueryControlFlags_
{
VK_QUERY_IMPRECISE_DATA_BIT = 0x00000001,
- VK_MAX_ENUM(_VK_QUERY_CONTROL_FLAGS)
-} VK_QUERY_CONTROL_FLAGS;
+ VK_MAX_ENUM(VkQueryControlFlags_)
+} VkQueryControlFlags;
// GPU compatibility flags
-typedef enum _VK_GPU_COMPATIBILITY_FLAGS
+typedef enum VkGpuCompatibilityFlags_
{
VK_GPU_COMPAT_ASIC_FEATURES_BIT = 0x00000001,
VK_GPU_COMPAT_IQ_MATCH_BIT = 0x00000002,
@@ -1270,49 +1270,49 @@ typedef enum _VK_GPU_COMPATIBILITY_FLAGS
VK_GPU_COMPAT_SHARED_SYNC_BIT = 0x00000010,
VK_GPU_COMPAT_SHARED_GPU0_DISPLAY_BIT = 0x00000020,
VK_GPU_COMPAT_SHARED_GPU1_DISPLAY_BIT = 0x00000040,
- VK_MAX_ENUM(_VK_GPU_COMPATIBILITY_FLAGS)
-} VK_GPU_COMPATIBILITY_FLAGS;
+ VK_MAX_ENUM(VkGpuCompatibilityFlags_)
+} VkGpuCompatibilityFlags;
// Command buffer building flags
-typedef enum _VK_CMD_BUFFER_BUILD_FLAGS
+typedef enum VkCmdBufferBuildFlags_
{
VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT = 0x00000001,
VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = 0x00000002,
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = 0x00000004,
VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = 0x00000008,
- VK_MAX_ENUM(_VK_CMD_BUFFER_BUILD_FLAGS)
-} VK_CMD_BUFFER_BUILD_FLAGS;
+ VK_MAX_ENUM(VkCmdBufferBuildFlags_)
+} VkCmdBufferBuildFlags;
// ------------------------------------------------------------------------------------------------
// Vulkan structures
-typedef struct _VK_OFFSET2D
+typedef struct VkOffset2D_
{
int32_t x;
int32_t y;
-} VK_OFFSET2D;
+} VkOffset2D;
-typedef struct _VK_OFFSET3D
+typedef struct VkOffset3D_
{
int32_t x;
int32_t y;
int32_t z;
-} VK_OFFSET3D;
+} VkOffset3D;
-typedef struct _VK_EXTENT2D
+typedef struct VkExtent2D_
{
int32_t width;
int32_t height;
-} VK_EXTENT2D;
+} VkExtent2D;
-typedef struct _VK_EXTENT3D
+typedef struct VkExtent3D_
{
int32_t width;
int32_t height;
int32_t depth;
-} VK_EXTENT3D;
+} VkExtent3D;
-typedef struct _VK_VIEWPORT
+typedef struct VkViewport_
{
float originX;
float originY;
@@ -1320,31 +1320,31 @@ typedef struct _VK_VIEWPORT
float height;
float minDepth;
float maxDepth;
-} VK_VIEWPORT;
+} VkViewport;
-typedef struct _VK_RECT
+typedef struct VkRect_
{
- VK_OFFSET2D offset;
- VK_EXTENT2D extent;
-} VK_RECT;
+ VkOffset2D offset;
+ VkExtent2D extent;
+} VkRect;
-typedef struct _VK_CHANNEL_MAPPING
+typedef struct VkChannelMapping_
{
- VK_CHANNEL_SWIZZLE r;
- VK_CHANNEL_SWIZZLE g;
- VK_CHANNEL_SWIZZLE b;
- VK_CHANNEL_SWIZZLE a;
-} VK_CHANNEL_MAPPING;
+ VkChannelSwizzle r;
+ VkChannelSwizzle g;
+ VkChannelSwizzle b;
+ VkChannelSwizzle a;
+} VkChannelMapping;
-typedef struct _VK_PHYSICAL_GPU_PROPERTIES
+typedef struct VkPhysicalGpuProperties_
{
uint32_t apiVersion;
uint32_t driverVersion;
uint32_t vendorId;
uint32_t deviceId;
- VK_PHYSICAL_GPU_TYPE gpuType;
+ VkPhysicalGpuType gpuType;
char gpuName[VK_MAX_PHYSICAL_GPU_NAME];
- VK_GPU_SIZE maxInlineMemoryUpdateSize;
+ VkGpuSize maxInlineMemoryUpdateSize;
uint32_t maxBoundDescriptorSets;
uint32_t maxThreadGroupSize;
uint64_t timestampFrequency;
@@ -1352,49 +1352,49 @@ typedef struct _VK_PHYSICAL_GPU_PROPERTIES
uint32_t maxDescriptorSets; // at least 2?
uint32_t maxViewports; // at least 16?
uint32_t maxColorAttachments; // at least 8?
-} VK_PHYSICAL_GPU_PROPERTIES;
+} VkPhysicalGpuProperties;
-typedef struct _VK_PHYSICAL_GPU_PERFORMANCE
+typedef struct VkPhysicalGpuPerformance_
{
float maxGpuClock;
float aluPerClock;
float texPerClock;
float primsPerClock;
float pixelsPerClock;
-} VK_PHYSICAL_GPU_PERFORMANCE;
+} VkPhysicalGpuPerformance;
-typedef struct _VK_GPU_COMPATIBILITY_INFO
+typedef struct VkGpuCompatibilityInfo_
{
- VK_FLAGS compatibilityFlags; // VK_GPU_COMPATIBILITY_FLAGS
-} VK_GPU_COMPATIBILITY_INFO;
+ VkFlags compatibilityFlags; // VkGpuCompatibilityFlags
+} VkGpuCompatibilityInfo;
-typedef struct _VK_APPLICATION_INFO
+typedef struct VkApplicationInfo_
{
- VK_STRUCTURE_TYPE sType; // Type of structure. Should be VK_STRUCTURE_TYPE_APPLICATION_INFO
+ VkStructureType sType; // Type of structure. Should be VK_STRUCTURE_TYPE_APPLICATION_INFO
const void* pNext; // Next structure in chain
const char* pAppName;
uint32_t appVersion;
const char* pEngineName;
uint32_t engineVersion;
uint32_t apiVersion;
-} VK_APPLICATION_INFO;
+} VkApplicationInfo;
-typedef void* (VKAPI *VK_ALLOC_FUNCTION)(
+typedef void* (VKAPI *PFN_vkAllocFunction)(
void* pUserData,
size_t size,
size_t alignment,
- VK_SYSTEM_ALLOC_TYPE allocType);
+ VkSystemAllocType allocType);
-typedef void (VKAPI *VK_FREE_FUNCTION)(
+typedef void (VKAPI *PFN_vkFreeFunction)(
void* pUserData,
void* pMem);
-typedef struct _VK_ALLOC_CALLBACKS
+typedef struct VkAllocCallbacks_
{
void* pUserData;
- VK_ALLOC_FUNCTION pfnAlloc;
- VK_FREE_FUNCTION pfnFree;
-} VK_ALLOC_CALLBACKS;
+ PFN_vkAllocFunction pfnAlloc;
+ PFN_vkFreeFunction pfnFree;
+} VkAllocCallbacks;
typedef struct _VkDeviceQueueCreateInfo
{
@@ -1404,22 +1404,22 @@ typedef struct _VkDeviceQueueCreateInfo
typedef struct _VkDeviceCreateInfo
{
- VK_STRUCTURE_TYPE sType; // Should be VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
+ VkStructureType sType; // Should be VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t queueRecordCount;
const VkDeviceQueueCreateInfo* pRequestedQueues;
uint32_t extensionCount;
const char*const* ppEnabledExtensionNames;
- VK_VALIDATION_LEVEL maxValidationLevel;
- VK_FLAGS flags; // VK_DEVICE_CREATE_FLAGS
+ VkValidationLevel maxValidationLevel;
+ VkFlags flags; // VkDeviceCreateFlags
} VkDeviceCreateInfo;
typedef struct _VkInstanceCreateInfo
{
- VK_STRUCTURE_TYPE sType; // Should be VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
+ VkStructureType sType; // Should be VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO
const void* pNext; // Pointer to next structure
- const VK_APPLICATION_INFO* pAppInfo;
- const VK_ALLOC_CALLBACKS* pAllocCb;
+ const VkApplicationInfo* pAppInfo;
+ const VkAllocCallbacks* pAllocCb;
uint32_t extensionCount;
const char*const* ppEnabledExtensionNames; // layer or extension name to be enabled
} VkInstanceCreateInfo;
@@ -1427,656 +1427,656 @@ typedef struct _VkInstanceCreateInfo
// can be added to VkDeviceCreateInfo or VkInstanceCreateInfo via pNext
typedef struct _VkLayerCreateInfo
{
- VK_STRUCTURE_TYPE sType; // Should be VK_STRUCTURE_TYPE_LAYER_CREATE_INFO
+ VkStructureType sType; // Should be VK_STRUCTURE_TYPE_LAYER_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t layerCount;
const char *const* ppActiveLayerNames; // layer name from the layer's vkEnumerateLayers())
} VkLayerCreateInfo;
-typedef struct _VK_PHYSICAL_GPU_QUEUE_PROPERTIES
+typedef struct VkPhysicalGpuQueueProperties_
{
- VK_FLAGS queueFlags; // VK_QUEUE_FLAGS
+ VkFlags queueFlags; // VkQueueFlags
uint32_t queueCount;
uint32_t maxAtomicCounters;
bool32_t supportsTimestamps;
uint32_t maxMemReferences; // Tells how many memory references can be active for the given queue
-} VK_PHYSICAL_GPU_QUEUE_PROPERTIES;
+} VkPhysicalGpuQueueProperties;
-typedef struct _VK_PHYSICAL_GPU_MEMORY_PROPERTIES
+typedef struct VkPhysicalGpuMemoryProperties_
{
bool32_t supportsMigration;
bool32_t supportsPinning;
-} VK_PHYSICAL_GPU_MEMORY_PROPERTIES;
+} VkPhysicalGpuMemoryProperties;
typedef struct _VkMemoryAllocInfo
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
const void* pNext; // Pointer to next structure
- VK_GPU_SIZE allocationSize; // Size of memory allocation
- VK_FLAGS memProps; // VK_MEMORY_PROPERTY_FLAGS
- VK_MEMORY_TYPE memType;
- VK_MEMORY_PRIORITY memPriority;
+ VkGpuSize allocationSize; // Size of memory allocation
+ VkFlags memProps; // VkMemoryPropertyFlags
+ VkMemoryType memType;
+ VkMemoryPriority memPriority;
} VkMemoryAllocInfo;
// This structure is included in the VkMemoryAllocInfo chain
// for memory regions allocated for buffer usage.
typedef struct _VkMemoryAllocBufferInfo
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_BUFFER_INFO
const void* pNext; // Pointer to next structure
- VK_FLAGS usage; // VK_BUFFER_USAGE_FLAGS
+ VkFlags usage; // VkBufferUsageFlags
} VkMemoryAllocBufferInfo;
// This structure is included in the VkMemoryAllocInfo chain
// for memory regions allocated for image usage.
typedef struct _VkMemoryAllocImageInfo
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_IMAGE_INFO
const void* pNext; // Pointer to next structure
- VK_FLAGS usage; // VK_IMAGE_USAGE_FLAGS
- VK_IMAGE_FORMAT_CLASS formatClass;
+ VkFlags usage; // VkImageUsageFlags
+ VkImageFormatClass formatClass;
uint32_t samples;
} VkMemoryAllocImageInfo;
-typedef struct _VK_MEMORY_OPEN_INFO
+typedef struct VkMemoryOpenInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_MEMORY_OPEN_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_OPEN_INFO
const void* pNext; // Pointer to next structure
- VK_GPU_MEMORY sharedMem;
-} VK_MEMORY_OPEN_INFO;
+ VkGpuMemory sharedMem;
+} VkMemoryOpenInfo;
-typedef struct _VK_PEER_MEMORY_OPEN_INFO
+typedef struct VkPeerMemoryOpenInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO
const void* pNext; // Pointer to next structure
- VK_GPU_MEMORY originalMem;
-} VK_PEER_MEMORY_OPEN_INFO;
+ VkGpuMemory originalMem;
+} VkPeerMemoryOpenInfo;
-typedef struct _VK_MEMORY_REQUIREMENTS
+typedef struct VkMemoryRequirements_
{
- VK_GPU_SIZE size; // Specified in bytes
- VK_GPU_SIZE alignment; // Specified in bytes
- VK_GPU_SIZE granularity; // Granularity on which vkBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
- VK_FLAGS memProps; // VK_MEMORY_PROPERTY_FLAGS
- VK_MEMORY_TYPE memType;
-} VK_MEMORY_REQUIREMENTS;
+ VkGpuSize size; // Specified in bytes
+ VkGpuSize alignment; // Specified in bytes
+ VkGpuSize granularity; // Granularity on which vkBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
+ VkFlags memProps; // VkMemoryPropertyFlags
+ VkMemoryType memType;
+} VkMemoryRequirements;
-typedef struct _VK_BUFFER_MEMORY_REQUIREMENTS
+typedef struct VkBufferMemoryRequirements_
{
- VK_FLAGS usage; // VK_BUFFER_USAGE_FLAGS
-} VK_BUFFER_MEMORY_REQUIREMENTS;
+ VkFlags usage; // VkBufferUsageFlags
+} VkBufferMemoryRequirements;
-typedef struct _VK_IMAGE_MEMORY_REQUIREMENTS
+typedef struct VkImageMemoryRequirements_
{
- VK_FLAGS usage; // VK_IMAGE_USAGE_FLAGS
- VK_IMAGE_FORMAT_CLASS formatClass;
+ VkFlags usage; // VkImageUsageFlags
+ VkImageFormatClass formatClass;
uint32_t samples;
-} VK_IMAGE_MEMORY_REQUIREMENTS;
+} VkImageMemoryRequirements;
-typedef struct _VK_FORMAT_PROPERTIES
+typedef struct VkFormatProperties_
{
- VK_FLAGS linearTilingFeatures; // VK_FORMAT_FEATURE_FLAGS
- VK_FLAGS optimalTilingFeatures; // VK_FORMAT_FEATURE_FLAGS
-} VK_FORMAT_PROPERTIES;
+ VkFlags linearTilingFeatures; // VkFormatFeatureFlags
+ VkFlags optimalTilingFeatures; // VkFormatFeatureFlags
+} VkFormatProperties;
-typedef struct _VK_BUFFER_VIEW_ATTACH_INFO
+typedef struct VkBufferViewAttachInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO
const void* pNext; // Pointer to next structure
- VK_BUFFER_VIEW view;
-} VK_BUFFER_VIEW_ATTACH_INFO;
+ VkBufferView view;
+} VkBufferViewAttachInfo;
-typedef struct _VK_IMAGE_VIEW_ATTACH_INFO
+typedef struct VkImageViewAttachInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO
const void* pNext; // Pointer to next structure
- VK_IMAGE_VIEW view;
- VK_IMAGE_LAYOUT layout;
-} VK_IMAGE_VIEW_ATTACH_INFO;
+ VkImageView view;
+ VkImageLayout layout;
+} VkImageViewAttachInfo;
-typedef struct _VK_UPDATE_SAMPLERS
+typedef struct VkUpdateSamplers_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_UPDATE_SAMPLERS
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_UPDATE_SAMPLERS
const void* pNext; // Pointer to next structure
uint32_t binding; // Binding of the sampler (array)
uint32_t arrayIndex; // First element of the array to update or zero otherwise
uint32_t count; // Number of elements to update
- const VK_SAMPLER* pSamplers;
-} VK_UPDATE_SAMPLERS;
+ const VkSampler* pSamplers;
+} VkUpdateSamplers;
-typedef struct _VK_SAMPLER_IMAGE_VIEW_INFO
+typedef struct VkSamplerImageViewInfo_
{
- VK_SAMPLER sampler;
- const VK_IMAGE_VIEW_ATTACH_INFO* pImageView;
-} VK_SAMPLER_IMAGE_VIEW_INFO;
+ VkSampler sampler;
+ const VkImageViewAttachInfo* pImageView;
+} VkSamplerImageViewInfo;
-typedef struct _VK_UPDATE_SAMPLER_TEXTURES
+typedef struct VkUpdateSamplerTextures_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES
const void* pNext; // Pointer to next structure
uint32_t binding; // Binding of the combined texture sampler (array)
uint32_t arrayIndex; // First element of the array to update or zero otherwise
uint32_t count; // Number of elements to update
- const VK_SAMPLER_IMAGE_VIEW_INFO* pSamplerImageViews;
-} VK_UPDATE_SAMPLER_TEXTURES;
+ const VkSamplerImageViewInfo* pSamplerImageViews;
+} VkUpdateSamplerTextures;
-typedef struct _VK_UPDATE_IMAGES
+typedef struct VkUpdateImages_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_UPDATE_IMAGES
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_UPDATE_IMAGES
const void* pNext; // Pointer to next structure
- VK_DESCRIPTOR_TYPE descriptorType;
+ VkDescriptorType descriptorType;
uint32_t binding; // Binding of the image (array)
uint32_t arrayIndex; // First element of the array to update or zero otherwise
uint32_t count; // Number of elements to update
- const VK_IMAGE_VIEW_ATTACH_INFO* pImageViews;
-} VK_UPDATE_IMAGES;
+ const VkImageViewAttachInfo* pImageViews;
+} VkUpdateImages;
-typedef struct _VK_UPDATE_BUFFERS
+typedef struct VkUpdateBuffers_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_UPDATE_BUFFERS
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_UPDATE_BUFFERS
const void* pNext; // Pointer to next structure
- VK_DESCRIPTOR_TYPE descriptorType;
+ VkDescriptorType descriptorType;
uint32_t binding; // Binding of the buffer (array)
uint32_t arrayIndex; // First element of the array to update or zero otherwise
uint32_t count; // Number of elements to update
- const VK_BUFFER_VIEW_ATTACH_INFO* pBufferViews;
-} VK_UPDATE_BUFFERS;
+ const VkBufferViewAttachInfo* pBufferViews;
+} VkUpdateBuffers;
-typedef struct _VK_UPDATE_AS_COPY
+typedef struct VkUpdateAsCopy_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_UPDATE_AS_COPY
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_UPDATE_AS_COPY
const void* pNext; // Pointer to next structure
- VK_DESCRIPTOR_TYPE descriptorType;
- VK_DESCRIPTOR_SET descriptorSet;
+ VkDescriptorType descriptorType;
+ VkDescriptorSet descriptorSet;
uint32_t binding;
uint32_t arrayElement;
uint32_t count;
-} VK_UPDATE_AS_COPY;
+} VkUpdateAsCopy;
typedef struct _VkBufferCreateInfo
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
const void* pNext; // Pointer to next structure.
- VK_GPU_SIZE size; // Specified in bytes
- VK_FLAGS usage; // VK_BUFFER_USAGE_FLAGS
- VK_FLAGS flags; // VK_BUFFER_CREATE_FLAGS
+ VkGpuSize size; // Specified in bytes
+ VkFlags usage; // VkBufferUsageFlags
+ VkFlags flags; // VkBufferCreateFlags
} VkBufferCreateInfo;
typedef struct _VkBufferViewCreateInfo
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO
const void* pNext; // Pointer to next structure.
- VK_BUFFER buffer;
- VK_BUFFER_VIEW_TYPE viewType;
- VK_FORMAT format; // Optionally specifies format of elements
- VK_GPU_SIZE offset; // Specified in bytes
- VK_GPU_SIZE range; // View size specified in bytes
+ VkBuffer buffer;
+ VkBufferViewType viewType;
+ VkFormat format; // Optionally specifies format of elements
+ VkGpuSize offset; // Specified in bytes
+ VkGpuSize range; // View size specified in bytes
} VkBufferViewCreateInfo;
-typedef struct _VK_IMAGE_SUBRESOURCE
+typedef struct VkImageSubresource_
{
- VK_IMAGE_ASPECT aspect;
+ VkImageAspect aspect;
uint32_t mipLevel;
uint32_t arraySlice;
-} VK_IMAGE_SUBRESOURCE;
+} VkImageSubresource;
-typedef struct _VK_IMAGE_SUBRESOURCE_RANGE
+typedef struct VkImageSubresourceRange_
{
- VK_IMAGE_ASPECT aspect;
+ VkImageAspect aspect;
uint32_t baseMipLevel;
uint32_t mipLevels;
uint32_t baseArraySlice;
uint32_t arraySize;
-} VK_IMAGE_SUBRESOURCE_RANGE;
+} VkImageSubresourceRange;
-typedef struct _VK_EVENT_WAIT_INFO
+typedef struct VkEventWaitInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_EVENT_WAIT_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_EVENT_WAIT_INFO
const void* pNext; // Pointer to next structure.
uint32_t eventCount; // Number of events to wait on
- const VK_EVENT* pEvents; // Array of event objects to wait on
+ const VkEvent* pEvents; // Array of event objects to wait on
- VK_WAIT_EVENT waitEvent; // Pipeline event where the wait should happen
+ VkWaitEvent waitEvent; // Pipeline event where the wait should happen
uint32_t memBarrierCount; // Number of memory barriers
- const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VK_MEMORY_BARRIER, VK_BUFFER_MEMORY_BARRIER, or VK_IMAGE_MEMORY_BARRIER)
-} VK_EVENT_WAIT_INFO;
+ const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VkMemoryBarrier, VkBufferMemoryBarrier, or VkImageMemoryBarrier)
+} VkEventWaitInfo;
-typedef struct _VK_PIPELINE_BARRIER
+typedef struct VkPipelineBarrier_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_BARRIER
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_BARRIER
const void* pNext; // Pointer to next structure.
uint32_t eventCount; // Number of events to wait on
- const VK_PIPE_EVENT* pEvents; // Array of pipeline events to wait on
+ const VkPipeEvent* pEvents; // Array of pipeline events to wait on
- VK_WAIT_EVENT waitEvent; // Pipeline event where the wait should happen
+ VkWaitEvent waitEvent; // Pipeline event where the wait should happen
uint32_t memBarrierCount; // Number of memory barriers
- const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VK_MEMORY_BARRIER, VK_BUFFER_MEMORY_BARRIER, or VK_IMAGE_MEMORY_BARRIER)
-} VK_PIPELINE_BARRIER;
+ const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VkMemoryBarrier, VkBufferMemoryBarrier, or VkImageMemoryBarrier)
+} VkPipelineBarrier;
-typedef struct _VK_MEMORY_BARRIER
+typedef struct VkMemoryBarrier_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_MEMORY_BARRIER
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VK_FLAGS outputMask; // Outputs the barrier should sync (see VK_MEMORY_OUTPUT_FLAGS)
- VK_FLAGS inputMask; // Inputs the barrier should sync to (see VK_MEMORY_INPUT_FLAGS)
-} VK_MEMORY_BARRIER;
+ VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
+ VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
+} VkMemoryBarrier;
-typedef struct _VK_BUFFER_MEMORY_BARRIER
+typedef struct VkBufferMemoryBarrier_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VK_FLAGS outputMask; // Outputs the barrier should sync (see VK_MEMORY_OUTPUT_FLAGS)
- VK_FLAGS inputMask; // Inputs the barrier should sync to (see VK_MEMORY_INPUT_FLAGS)
+ VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
+ VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
- VK_BUFFER buffer; // Buffer to sync
+ VkBuffer buffer; // Buffer to sync
- VK_GPU_SIZE offset; // Offset within the buffer to sync
- VK_GPU_SIZE size; // Amount of bytes to sync
-} VK_BUFFER_MEMORY_BARRIER;
+ VkGpuSize offset; // Offset within the buffer to sync
+ VkGpuSize size; // Amount of bytes to sync
+} VkBufferMemoryBarrier;
-typedef struct _VK_IMAGE_MEMORY_BARRIER
+typedef struct VkImageMemoryBarrier_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VK_FLAGS outputMask; // Outputs the barrier should sync (see VK_MEMORY_OUTPUT_FLAGS)
- VK_FLAGS inputMask; // Inputs the barrier should sync to (see VK_MEMORY_INPUT_FLAGS)
+ VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
+ VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
- VK_IMAGE_LAYOUT oldLayout; // Current layout of the image
- VK_IMAGE_LAYOUT newLayout; // New layout to transition the image to
+ VkImageLayout oldLayout; // Current layout of the image
+ VkImageLayout newLayout; // New layout to transition the image to
- VK_IMAGE image; // Image to sync
+ VkImage image; // Image to sync
- VK_IMAGE_SUBRESOURCE_RANGE subresourceRange; // Subresource range to sync
-} VK_IMAGE_MEMORY_BARRIER;
+ VkImageSubresourceRange subresourceRange; // Subresource range to sync
+} VkImageMemoryBarrier;
-typedef struct _VK_IMAGE_CREATE_INFO
+typedef struct VkImageCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO
const void* pNext; // Pointer to next structure.
- VK_IMAGE_TYPE imageType;
- VK_FORMAT format;
- VK_EXTENT3D extent;
+ VkImageType imageType;
+ VkFormat format;
+ VkExtent3D extent;
uint32_t mipLevels;
uint32_t arraySize;
uint32_t samples;
- VK_IMAGE_TILING tiling;
- VK_FLAGS usage; // VK_IMAGE_USAGE_FLAGS
- VK_FLAGS flags; // VK_IMAGE_CREATE_FLAGS
-} VK_IMAGE_CREATE_INFO;
+ VkImageTiling tiling;
+ VkFlags usage; // VkImageUsageFlags
+ VkFlags flags; // VkImageCreateFlags
+} VkImageCreateInfo;
-typedef struct _VK_PEER_IMAGE_OPEN_INFO
+typedef struct VkPeerImageOpenInfo_
{
- VK_IMAGE originalImage;
-} VK_PEER_IMAGE_OPEN_INFO;
+ VkImage originalImage;
+} VkPeerImageOpenInfo;
-typedef struct _VK_SUBRESOURCE_LAYOUT
+typedef struct VkSubresourceLayout_
{
- VK_GPU_SIZE offset; // Specified in bytes
- VK_GPU_SIZE size; // Specified in bytes
- VK_GPU_SIZE rowPitch; // Specified in bytes
- VK_GPU_SIZE depthPitch; // Specified in bytes
-} VK_SUBRESOURCE_LAYOUT;
+ VkGpuSize offset; // Specified in bytes
+ VkGpuSize size; // Specified in bytes
+ VkGpuSize rowPitch; // Specified in bytes
+ VkGpuSize depthPitch; // Specified in bytes
+} VkSubresourceLayout;
-typedef struct _VK_IMAGE_VIEW_CREATE_INFO
+typedef struct VkImageViewCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_IMAGE image;
- VK_IMAGE_VIEW_TYPE viewType;
- VK_FORMAT format;
- VK_CHANNEL_MAPPING channels;
- VK_IMAGE_SUBRESOURCE_RANGE subresourceRange;
+ VkImage image;
+ VkImageViewType viewType;
+ VkFormat format;
+ VkChannelMapping channels;
+ VkImageSubresourceRange subresourceRange;
float minLod;
-} VK_IMAGE_VIEW_CREATE_INFO;
+} VkImageViewCreateInfo;
-typedef struct _VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO
+typedef struct VkColorAttachmentViewCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_IMAGE image;
- VK_FORMAT format;
+ VkImage image;
+ VkFormat format;
uint32_t mipLevel;
uint32_t baseArraySlice;
uint32_t arraySize;
- VK_IMAGE msaaResolveImage;
- VK_IMAGE_SUBRESOURCE_RANGE msaaResolveSubResource;
-} VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO;
+ VkImage msaaResolveImage;
+ VkImageSubresourceRange msaaResolveSubResource;
+} VkColorAttachmentViewCreateInfo;
-typedef struct _VK_DEPTH_STENCIL_VIEW_CREATE_INFO
+typedef struct VkDepthStencilViewCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_IMAGE image;
+ VkImage image;
uint32_t mipLevel;
uint32_t baseArraySlice;
uint32_t arraySize;
- VK_IMAGE msaaResolveImage;
- VK_IMAGE_SUBRESOURCE_RANGE msaaResolveSubResource;
- VK_FLAGS flags; // VK_DEPTH_STENCIL_VIEW_CREATE_FLAGS
-} VK_DEPTH_STENCIL_VIEW_CREATE_INFO;
+ VkImage msaaResolveImage;
+ VkImageSubresourceRange msaaResolveSubResource;
+ VkFlags flags; // VkDepthStencilViewCreateFlags
+} VkDepthStencilViewCreateInfo;
-typedef struct _VK_COLOR_ATTACHMENT_BIND_INFO
+typedef struct VkColorAttachmentBindInfo_
{
- VK_COLOR_ATTACHMENT_VIEW view;
- VK_IMAGE_LAYOUT layout;
-} VK_COLOR_ATTACHMENT_BIND_INFO;
+ VkColorAttachmentView view;
+ VkImageLayout layout;
+} VkColorAttachmentBindInfo;
-typedef struct _VK_DEPTH_STENCIL_BIND_INFO
+typedef struct VkDepthStencilBindInfo_
{
- VK_DEPTH_STENCIL_VIEW view;
- VK_IMAGE_LAYOUT layout;
-} VK_DEPTH_STENCIL_BIND_INFO;
+ VkDepthStencilView view;
+ VkImageLayout layout;
+} VkDepthStencilBindInfo;
-typedef struct _VK_BUFFER_COPY
+typedef struct VkBufferCopy_
{
- VK_GPU_SIZE srcOffset; // Specified in bytes
- VK_GPU_SIZE destOffset; // Specified in bytes
- VK_GPU_SIZE copySize; // Specified in bytes
-} VK_BUFFER_COPY;
+ VkGpuSize srcOffset; // Specified in bytes
+ VkGpuSize destOffset; // Specified in bytes
+ VkGpuSize copySize; // Specified in bytes
+} VkBufferCopy;
-typedef struct _VK_IMAGE_MEMORY_BIND_INFO
+typedef struct VkImageMemoryBindInfo_
{
- VK_IMAGE_SUBRESOURCE subresource;
- VK_OFFSET3D offset;
- VK_EXTENT3D extent;
-} VK_IMAGE_MEMORY_BIND_INFO;
+ VkImageSubresource subresource;
+ VkOffset3D offset;
+ VkExtent3D extent;
+} VkImageMemoryBindInfo;
-typedef struct _VK_IMAGE_COPY
+typedef struct VkImageCopy_
{
- VK_IMAGE_SUBRESOURCE srcSubresource;
- VK_OFFSET3D srcOffset;
- VK_IMAGE_SUBRESOURCE destSubresource;
- VK_OFFSET3D destOffset;
- VK_EXTENT3D extent;
-} VK_IMAGE_COPY;
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D extent;
+} VkImageCopy;
-typedef struct _VK_IMAGE_BLIT
+typedef struct VkImageBlit_
{
- VK_IMAGE_SUBRESOURCE srcSubresource;
- VK_OFFSET3D srcOffset;
- VK_EXTENT3D srcExtent;
- VK_IMAGE_SUBRESOURCE destSubresource;
- VK_OFFSET3D destOffset;
- VK_EXTENT3D destExtent;
-} VK_IMAGE_BLIT;
+ VkImageSubresource srcSubresource;
+ VkOffset3D srcOffset;
+ VkExtent3D srcExtent;
+ VkImageSubresource destSubresource;
+ VkOffset3D destOffset;
+ VkExtent3D destExtent;
+} VkImageBlit;
-typedef struct _VK_BUFFER_IMAGE_COPY
+typedef struct VkBufferImageCopy_
{
- VK_GPU_SIZE bufferOffset; // Specified in bytes
- VK_IMAGE_SUBRESOURCE imageSubresource;
- VK_OFFSET3D imageOffset;
- VK_EXTENT3D imageExtent;
-} VK_BUFFER_IMAGE_COPY;
+ VkGpuSize bufferOffset; // Specified in bytes
+ VkImageSubresource imageSubresource;
+ VkOffset3D imageOffset;
+ VkExtent3D imageExtent;
+} VkBufferImageCopy;
-typedef struct _VK_IMAGE_RESOLVE
+typedef struct VkImageResolve_
{
- VK_IMAGE_SUBRESOURCE srcSubresource;
- VK_OFFSET2D srcOffset;
- VK_IMAGE_SUBRESOURCE destSubresource;
- VK_OFFSET2D destOffset;
- VK_EXTENT2D extent;
-} VK_IMAGE_RESOLVE;
+ VkImageSubresource srcSubresource;
+ VkOffset2D srcOffset;
+ VkImageSubresource destSubresource;
+ VkOffset2D destOffset;
+ VkExtent2D extent;
+} VkImageResolve;
-typedef struct _VK_SHADER_CREATE_INFO
+typedef struct VkShaderCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
const void* pNext; // Pointer to next structure
size_t codeSize; // Specified in bytes
const void* pCode;
- VK_FLAGS flags; // Reserved
-} VK_SHADER_CREATE_INFO;
+ VkFlags flags; // Reserved
+} VkShaderCreateInfo;
-typedef struct _VK_DESCRIPTOR_SET_LAYOUT_BINDING
+typedef struct VkDescriptorSetLayoutBinding_
{
- VK_DESCRIPTOR_TYPE descriptorType;
+ VkDescriptorType descriptorType;
uint32_t count;
- VK_FLAGS stageFlags; // VK_SHADER_STAGE_FLAGS
- const VK_SAMPLER* pImmutableSamplers;
-} VK_DESCRIPTOR_SET_LAYOUT_BINDING;
+ VkFlags stageFlags; // VkShaderStageFlags
+ const VkSampler* pImmutableSamplers;
+} VkDescriptorSetLayoutBinding;
-typedef struct _VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
+typedef struct VkDescriptorSetLayoutCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t count; // Number of bindings in the descriptor set layout
- const VK_DESCRIPTOR_SET_LAYOUT_BINDING* pBinding; // Array of descriptor set layout bindings
-} VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ const VkDescriptorSetLayoutBinding* pBinding; // Array of descriptor set layout bindings
+} VkDescriptorSetLayoutCreateInfo;
-typedef struct _VK_DESCRIPTOR_TYPE_COUNT
+typedef struct VkDescriptorTypeCount_
{
- VK_DESCRIPTOR_TYPE type;
+ VkDescriptorType type;
uint32_t count;
-} VK_DESCRIPTOR_TYPE_COUNT;
+} VkDescriptorTypeCount;
-typedef struct _VK_DESCRIPTOR_POOL_CREATE_INFO
+typedef struct VkDescriptorPoolCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t count;
- const VK_DESCRIPTOR_TYPE_COUNT* pTypeCount;
-} VK_DESCRIPTOR_POOL_CREATE_INFO;
+ const VkDescriptorTypeCount* pTypeCount;
+} VkDescriptorPoolCreateInfo;
-typedef struct _VK_LINK_CONST_BUFFER
+typedef struct VkLinkConstBuffer_
{
uint32_t bufferId;
size_t bufferSize;
const void* pBufferData;
-} VK_LINK_CONST_BUFFER;
+} VkLinkConstBuffer;
-typedef struct _VK_SPECIALIZATION_MAP_ENTRY
+typedef struct VkSpecializationMapEntry_
{
uint32_t constantId; // The SpecConstant ID specified in the BIL
uint32_t offset; // Offset of the value in the data block
-} VK_SPECIALIZATION_MAP_ENTRY;
+} VkSpecializationMapEntry;
-typedef struct _VK_SPECIALIZATION_INFO
+typedef struct VkSpecializationInfo_
{
uint32_t mapEntryCount;
- const VK_SPECIALIZATION_MAP_ENTRY* pMap; // mapEntryCount entries
+ const VkSpecializationMapEntry* pMap; // mapEntryCount entries
const void* pData;
-} VK_SPECIALIZATION_INFO;
+} VkSpecializationInfo;
-typedef struct _VK_PIPELINE_SHADER
+typedef struct VkPipelineShader_
{
- VK_PIPELINE_SHADER_STAGE stage;
- VK_SHADER shader;
+ VkPipelineShaderStage stage;
+ VkShader shader;
uint32_t linkConstBufferCount;
- const VK_LINK_CONST_BUFFER* pLinkConstBufferInfo;
- const VK_SPECIALIZATION_INFO* pSpecializationInfo;
-} VK_PIPELINE_SHADER;
+ const VkLinkConstBuffer* pLinkConstBufferInfo;
+ const VkSpecializationInfo* pSpecializationInfo;
+} VkPipelineShader;
-typedef struct _VK_COMPUTE_PIPELINE_CREATE_INFO
+typedef struct VkComputePipelineCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_PIPELINE_SHADER cs;
- VK_FLAGS flags; // VK_PIPELINE_CREATE_FLAGS
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN setLayoutChain;
+ VkPipelineShader cs;
+ VkFlags flags; // VkPipelineCreateFlags
+ VkDescriptorSetLayoutChain setLayoutChain;
uint32_t localSizeX;
uint32_t localSizeY;
uint32_t localSizeZ;
-} VK_COMPUTE_PIPELINE_CREATE_INFO;
+} VkComputePipelineCreateInfo;
-typedef struct _VK_VERTEX_INPUT_BINDING_DESCRIPTION
+typedef struct VkVertexInputBindingDescription_
{
uint32_t binding; // Vertex buffer binding id
uint32_t strideInBytes; // Distance between vertices in bytes (0 = no advancement)
- VK_VERTEX_INPUT_STEP_RATE stepRate; // Rate at which binding is incremented
-} VK_VERTEX_INPUT_BINDING_DESCRIPTION;
+ VkVertexInputStepRate stepRate; // Rate at which binding is incremented
+} VkVertexInputBindingDescription;
-typedef struct _VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION
+typedef struct VkVertexInputAttributeDescription_
{
uint32_t location; // location of the shader vertex attrib
uint32_t binding; // Vertex buffer binding id
- VK_FORMAT format; // format of source data
+ VkFormat format; // format of source data
uint32_t offsetInBytes; // Offset of first element in bytes from base of vertex
-} VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION;
+} VkVertexInputAttributeDescription;
-typedef struct _VK_PIPELINE_VERTEX_INPUT_CREATE_INFO
+typedef struct VkPipelineVertexInputCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Should be VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO
+ VkStructureType sType; // Should be VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t bindingCount; // number of bindings
- const VK_VERTEX_INPUT_BINDING_DESCRIPTION* pVertexBindingDescriptions;
+ const VkVertexInputBindingDescription* pVertexBindingDescriptions;
uint32_t attributeCount; // number of attributes
- const VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION* pVertexAttributeDescriptions;
-} VK_PIPELINE_VERTEX_INPUT_CREATE_INFO;
+ const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
+} VkPipelineVertexInputCreateInfo;
-typedef struct _VK_PIPELINE_IA_STATE_CREATE_INFO
+typedef struct VkPipelineIaStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_PRIMITIVE_TOPOLOGY topology;
+ VkPrimitiveTopology topology;
bool32_t disableVertexReuse; // optional
bool32_t primitiveRestartEnable;
uint32_t primitiveRestartIndex; // optional (GL45)
-} VK_PIPELINE_IA_STATE_CREATE_INFO;
+} VkPipelineIaStateCreateInfo;
-typedef struct _VK_PIPELINE_TESS_STATE_CREATE_INFO
+typedef struct VkPipelineTessStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t patchControlPoints;
-} VK_PIPELINE_TESS_STATE_CREATE_INFO;
+} VkPipelineTessStateCreateInfo;
-typedef struct _VK_PIPELINE_VP_STATE_CREATE_INFO
+typedef struct VkPipelineVpStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t numViewports;
- VK_COORDINATE_ORIGIN clipOrigin; // optional (GL45)
- VK_DEPTH_MODE depthMode; // optional (GL45)
-} VK_PIPELINE_VP_STATE_CREATE_INFO;
+ VkCoordinateOrigin clipOrigin; // optional (GL45)
+ VkDepthMode depthMode; // optional (GL45)
+} VkPipelineVpStateCreateInfo;
-typedef struct _VK_PIPELINE_RS_STATE_CREATE_INFO
+typedef struct VkPipelineRsStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
bool32_t depthClipEnable;
bool32_t rasterizerDiscardEnable;
bool32_t programPointSize; // optional (GL45)
- VK_COORDINATE_ORIGIN pointOrigin; // optional (GL45)
- VK_PROVOKING_VERTEX_CONVENTION provokingVertex; // optional (GL45)
- VK_FILL_MODE fillMode; // optional (GL45)
- VK_CULL_MODE cullMode;
- VK_FACE_ORIENTATION frontFace;
-} VK_PIPELINE_RS_STATE_CREATE_INFO;
+ VkCoordinateOrigin pointOrigin; // optional (GL45)
+ VkProvokingVertexConvention provokingVertex; // optional (GL45)
+ VkFillMode fillMode; // optional (GL45)
+ VkCullMode cullMode;
+ VkFaceOrientation frontFace;
+} VkPipelineRsStateCreateInfo;
-typedef struct _VK_PIPELINE_MS_STATE_CREATE_INFO
+typedef struct VkPipelineMsStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t samples;
bool32_t multisampleEnable; // optional (GL45)
bool32_t sampleShadingEnable; // optional (GL45)
float minSampleShading; // optional (GL45)
- VK_SAMPLE_MASK sampleMask;
-} VK_PIPELINE_MS_STATE_CREATE_INFO;
+ VkSampleMask sampleMask;
+} VkPipelineMsStateCreateInfo;
-typedef struct _VK_PIPELINE_CB_ATTACHMENT_STATE
+typedef struct VkPipelineCbAttachmentState_
{
bool32_t blendEnable;
- VK_FORMAT format;
- VK_BLEND srcBlendColor;
- VK_BLEND destBlendColor;
- VK_BLEND_FUNC blendFuncColor;
- VK_BLEND srcBlendAlpha;
- VK_BLEND destBlendAlpha;
- VK_BLEND_FUNC blendFuncAlpha;
+ VkFormat format;
+ VkBlend srcBlendColor;
+ VkBlend destBlendColor;
+ VkBlendFunc blendFuncColor;
+ VkBlend srcBlendAlpha;
+ VkBlend destBlendAlpha;
+ VkBlendFunc blendFuncAlpha;
uint8_t channelWriteMask;
-} VK_PIPELINE_CB_ATTACHMENT_STATE;
+} VkPipelineCbAttachmentState;
-typedef struct _VK_PIPELINE_CB_STATE_CREATE_INFO
+typedef struct VkPipelineCbStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
bool32_t alphaToCoverageEnable;
bool32_t logicOpEnable;
- VK_LOGIC_OP logicOp;
+ VkLogicOp logicOp;
uint32_t attachmentCount; // # of pAttachments
- const VK_PIPELINE_CB_ATTACHMENT_STATE* pAttachments;
-} VK_PIPELINE_CB_STATE_CREATE_INFO;
+ const VkPipelineCbAttachmentState* pAttachments;
+} VkPipelineCbStateCreateInfo;
-typedef struct _VK_STENCIL_OP_STATE
+typedef struct VkStencilOpState_
{
- VK_STENCIL_OP stencilFailOp;
- VK_STENCIL_OP stencilPassOp;
- VK_STENCIL_OP stencilDepthFailOp;
- VK_COMPARE_FUNC stencilFunc;
-} VK_STENCIL_OP_STATE;
+ VkStencilOp stencilFailOp;
+ VkStencilOp stencilPassOp;
+ VkStencilOp stencilDepthFailOp;
+ VkCompareFunc stencilFunc;
+} VkStencilOpState;
-typedef struct _VK_PIPELINE_DS_STATE_CREATE_INFO
+typedef struct VkPipelineDsStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_FORMAT format;
+ VkFormat format;
bool32_t depthTestEnable;
bool32_t depthWriteEnable;
- VK_COMPARE_FUNC depthFunc;
+ VkCompareFunc depthFunc;
bool32_t depthBoundsEnable; // optional (depth_bounds_test)
bool32_t stencilTestEnable;
- VK_STENCIL_OP_STATE front;
- VK_STENCIL_OP_STATE back;
-} VK_PIPELINE_DS_STATE_CREATE_INFO;
+ VkStencilOpState front;
+ VkStencilOpState back;
+} VkPipelineDsStateCreateInfo;
-typedef struct _VK_PIPELINE_SHADER_STAGE_CREATE_INFO
+typedef struct VkPipelineShaderStageCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_PIPELINE_SHADER shader;
-} VK_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ VkPipelineShader shader;
+} VkPipelineShaderStageCreateInfo;
-typedef struct _VK_GRAPHICS_PIPELINE_CREATE_INFO
+typedef struct VkGraphicsPipelineCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_FLAGS flags; // VK_PIPELINE_CREATE_FLAGS
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN pSetLayoutChain;
-} VK_GRAPHICS_PIPELINE_CREATE_INFO;
+ VkFlags flags; // VkPipelineCreateFlags
+ VkDescriptorSetLayoutChain pSetLayoutChain;
+} VkGraphicsPipelineCreateInfo;
-typedef struct _VK_SAMPLER_CREATE_INFO
+typedef struct VkSamplerCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_TEX_FILTER magFilter; // Filter mode for magnification
- VK_TEX_FILTER minFilter; // Filter mode for minifiation
- VK_TEX_MIPMAP_MODE mipMode; // Mipmap selection mode
- VK_TEX_ADDRESS addressU;
- VK_TEX_ADDRESS addressV;
- VK_TEX_ADDRESS addressW;
+ VkTexFilter magFilter; // Filter mode for magnification
+ VkTexFilter minFilter; // Filter mode for minifiation
+ VkTexMipmapMode mipMode; // Mipmap selection mode
+ VkTexAddress addressU;
+ VkTexAddress addressV;
+ VkTexAddress addressW;
float mipLodBias;
uint32_t maxAnisotropy;
- VK_COMPARE_FUNC compareFunc;
+ VkCompareFunc compareFunc;
float minLod;
float maxLod;
- VK_BORDER_COLOR_TYPE borderColorType;
-} VK_SAMPLER_CREATE_INFO;
+ VkBorderColorType borderColorType;
+} VkSamplerCreateInfo;
-typedef struct _VK_DYNAMIC_VP_STATE_CREATE_INFO
+typedef struct VkDynamicVpStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t viewportAndScissorCount; // number of entries in pViewports and pScissors
- const VK_VIEWPORT* pViewports;
- const VK_RECT* pScissors;
-} VK_DYNAMIC_VP_STATE_CREATE_INFO;
+ const VkViewport* pViewports;
+ const VkRect* pScissors;
+} VkDynamicVpStateCreateInfo;
-typedef struct _VK_DYNAMIC_RS_STATE_CREATE_INFO
+typedef struct VkDynamicRsStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
float depthBias;
float depthBiasClamp;
@@ -2084,18 +2084,18 @@ typedef struct _VK_DYNAMIC_RS_STATE_CREATE_INFO
float pointSize; // optional (GL45) - Size of points
float pointFadeThreshold; // optional (GL45) - Size of point fade threshold
float lineWidth; // optional (GL45) - Width of lines
-} VK_DYNAMIC_RS_STATE_CREATE_INFO;
+} VkDynamicRsStateCreateInfo;
-typedef struct _VK_DYNAMIC_CB_STATE_CREATE_INFO
+typedef struct VkDynamicCbStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
float blendConst[4];
-} VK_DYNAMIC_CB_STATE_CREATE_INFO;
+} VkDynamicCbStateCreateInfo;
-typedef struct _VK_DYNAMIC_DS_STATE_CREATE_INFO
+typedef struct VkDynamicDsStateCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
float minDepth; // optional (depth_bounds_test)
float maxDepth; // optional (depth_bounds_test)
@@ -2103,106 +2103,106 @@ typedef struct _VK_DYNAMIC_DS_STATE_CREATE_INFO
uint32_t stencilWriteMask;
uint32_t stencilFrontRef;
uint32_t stencilBackRef;
-} VK_DYNAMIC_DS_STATE_CREATE_INFO;
+} VkDynamicDsStateCreateInfo;
-typedef struct _VK_CMD_BUFFER_CREATE_INFO
+typedef struct VkCmdBufferCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t queueNodeIndex;
- VK_FLAGS flags;
-} VK_CMD_BUFFER_CREATE_INFO;
+ VkFlags flags;
+} VkCmdBufferCreateInfo;
-typedef struct _VK_CMD_BUFFER_BEGIN_INFO
+typedef struct VkCmdBufferBeginInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO
const void* pNext; // Pointer to next structure
- VK_FLAGS flags; // VK_CMD_BUFFER_BUILD_FLAGS
-} VK_CMD_BUFFER_BEGIN_INFO;
+ VkFlags flags; // VkCmdBufferBuildFlags
+} VkCmdBufferBeginInfo;
-typedef struct _VK_RENDER_PASS_BEGIN
+typedef struct VkRenderPassBegin_
{
- VK_RENDER_PASS renderPass;
- VK_FRAMEBUFFER framebuffer;
-} VK_RENDER_PASS_BEGIN;
+ VkRenderPass renderPass;
+ VkFramebuffer framebuffer;
+} VkRenderPassBegin;
-typedef struct _VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO
+typedef struct VkCmdBufferGraphicsBeginInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO
const void* pNext; // Pointer to next structure
- VK_RENDER_PASS_BEGIN renderPassContinue; // Only needed when a render pass is split across two command buffers
-} VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO;
+ VkRenderPassBegin renderPassContinue; // Only needed when a render pass is split across two command buffers
+} VkCmdBufferGraphicsBeginInfo;
// Union allowing specification of floating point or raw color data. Actual value selected is based on image being cleared.
-typedef union _VK_CLEAR_COLOR_VALUE
+typedef union VkClearColorValue_
{
float floatColor[4];
uint32_t rawColor[4];
-} VK_CLEAR_COLOR_VALUE;
+} VkClearColorValue;
-typedef struct _VK_CLEAR_COLOR
+typedef struct VkClearColor_
{
- VK_CLEAR_COLOR_VALUE color;
+ VkClearColorValue color;
bool32_t useRawValue;
-} VK_CLEAR_COLOR;
+} VkClearColor;
-typedef struct _VK_RENDER_PASS_CREATE_INFO
+typedef struct VkRenderPassCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_RECT renderArea;
+ VkRect renderArea;
uint32_t colorAttachmentCount;
- VK_EXTENT2D extent;
+ VkExtent2D extent;
uint32_t sampleCount;
uint32_t layers;
- const VK_FORMAT* pColorFormats;
- const VK_IMAGE_LAYOUT* pColorLayouts;
- const VK_ATTACHMENT_LOAD_OP* pColorLoadOps;
- const VK_ATTACHMENT_STORE_OP* pColorStoreOps;
- const VK_CLEAR_COLOR* pColorLoadClearValues;
- VK_FORMAT depthStencilFormat;
- VK_IMAGE_LAYOUT depthStencilLayout;
- VK_ATTACHMENT_LOAD_OP depthLoadOp;
+ const VkFormat* pColorFormats;
+ const VkImageLayout* pColorLayouts;
+ const VkAttachmentLoadOp* pColorLoadOps;
+ const VkAttachmentStoreOp* pColorStoreOps;
+ const VkClearColor* pColorLoadClearValues;
+ VkFormat depthStencilFormat;
+ VkImageLayout depthStencilLayout;
+ VkAttachmentLoadOp depthLoadOp;
float depthLoadClearValue;
- VK_ATTACHMENT_STORE_OP depthStoreOp;
- VK_ATTACHMENT_LOAD_OP stencilLoadOp;
+ VkAttachmentStoreOp depthStoreOp;
+ VkAttachmentLoadOp stencilLoadOp;
uint32_t stencilLoadClearValue;
- VK_ATTACHMENT_STORE_OP stencilStoreOp;
-} VK_RENDER_PASS_CREATE_INFO;
+ VkAttachmentStoreOp stencilStoreOp;
+} VkRenderPassCreateInfo;
-typedef struct _VK_EVENT_CREATE_INFO
+typedef struct VkEventCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_EVENT_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_EVENT_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_FLAGS flags; // Reserved
-} VK_EVENT_CREATE_INFO;
+ VkFlags flags; // Reserved
+} VkEventCreateInfo;
-typedef struct _VK_FENCE_CREATE_INFO
+typedef struct VkFenceCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_FENCE_CREATE_FLAGS flags; // VK_FENCE_CREATE_FLAGS
-} VK_FENCE_CREATE_INFO;
+ VkFenceCreateFlags flags; // VkFenceCreateFlags
+} VkFenceCreateInfo;
-typedef struct _VK_SEMAPHORE_CREATE_INFO
+typedef struct VkSemaphoreCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t initialCount;
- VK_FLAGS flags; // VK_SEMAPHORE_CREATE_FLAGS
-} VK_SEMAPHORE_CREATE_INFO;
+ VkFlags flags; // VkSemaphoreCreateFlags
+} VkSemaphoreCreateInfo;
-typedef struct _VK_SEMAPHORE_OPEN_INFO
+typedef struct VkSemaphoreOpenInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO
const void* pNext; // Pointer to next structure
- VK_SEMAPHORE sharedSemaphore;
-} VK_SEMAPHORE_OPEN_INFO;
+ VkSemaphore sharedSemaphore;
+} VkSemaphoreOpenInfo;
-typedef struct _VK_PIPELINE_STATISTICS_DATA
+typedef struct VkPipelineStatisticsData_
{
uint64_t fsInvocations; // Fragment shader invocations
uint64_t cPrimitives; // Clipper primitives
@@ -2215,219 +2215,219 @@ typedef struct _VK_PIPELINE_STATISTICS_DATA
uint64_t tcsInvocations; // Tessellation control shader invocations
uint64_t tesInvocations; // Tessellation evaluation shader invocations
uint64_t csInvocations; // Compute shader invocations
-} VK_PIPELINE_STATISTICS_DATA;
+} VkPipelineStatisticsData;
-typedef struct _VK_QUERY_POOL_CREATE_INFO
+typedef struct VkQueryPoolCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO
const void* pNext; // Pointer to next structure
- VK_QUERY_TYPE queryType;
+ VkQueryType queryType;
uint32_t slots;
-} VK_QUERY_POOL_CREATE_INFO;
+} VkQueryPoolCreateInfo;
-typedef struct _VK_FRAMEBUFFER_CREATE_INFO
+typedef struct VkFramebufferCreateInfo_
{
- VK_STRUCTURE_TYPE sType; // Must be VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t colorAttachmentCount;
- const VK_COLOR_ATTACHMENT_BIND_INFO* pColorAttachments;
- const VK_DEPTH_STENCIL_BIND_INFO* pDepthStencilAttachment;
+ const VkColorAttachmentBindInfo* pColorAttachments;
+ const VkDepthStencilBindInfo* pDepthStencilAttachment;
uint32_t sampleCount;
uint32_t width;
uint32_t height;
uint32_t layers;
-} VK_FRAMEBUFFER_CREATE_INFO;
+} VkFramebufferCreateInfo;
-typedef struct _VK_DRAW_INDIRECT_CMD
+typedef struct VkDrawIndirectCmd_
{
uint32_t vertexCount;
uint32_t instanceCount;
uint32_t firstVertex;
uint32_t firstInstance;
-} VK_DRAW_INDIRECT_CMD;
+} VkDrawIndirectCmd;
-typedef struct _VK_DRAW_INDEXED_INDIRECT_CMD
+typedef struct VkDrawIndexedIndirectCmd_
{
uint32_t indexCount;
uint32_t instanceCount;
uint32_t firstIndex;
int32_t vertexOffset;
uint32_t firstInstance;
-} VK_DRAW_INDEXED_INDIRECT_CMD;
+} VkDrawIndexedIndirectCmd;
-typedef struct _VK_DISPATCH_INDIRECT_CMD
+typedef struct VkDispatchIndirectCmd_
{
uint32_t x;
uint32_t y;
uint32_t z;
-} VK_DISPATCH_INDIRECT_CMD;
+} VkDispatchIndirectCmd;
// ------------------------------------------------------------------------------------------------
// API functions
-typedef VK_RESULT (VKAPI *vkCreateInstanceType)(const VkInstanceCreateInfo* pCreateInfo, VK_INSTANCE* pInstance);
-typedef VK_RESULT (VKAPI *vkDestroyInstanceType)(VK_INSTANCE instance);
-typedef VK_RESULT (VKAPI *vkEnumerateGpusType)(VK_INSTANCE instance, uint32_t maxGpus, uint32_t* pGpuCount, VK_PHYSICAL_GPU* pGpus);
-typedef VK_RESULT (VKAPI *vkGetGpuInfoType)(VK_PHYSICAL_GPU gpu, VK_PHYSICAL_GPU_INFO_TYPE infoType, size_t* pDataSize, void* pData);
-typedef void * (VKAPI *vkGetProcAddrType)(VK_PHYSICAL_GPU gpu, const char * pName);
-typedef VK_RESULT (VKAPI *vkCreateDeviceType)(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice);
-typedef VK_RESULT (VKAPI *vkDestroyDeviceType)(VK_DEVICE device);
-typedef VK_RESULT (VKAPI *vkGetExtensionSupportType)(VK_PHYSICAL_GPU gpu, const char* pExtName);
-typedef VK_RESULT (VKAPI *vkEnumerateLayersType)(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved);
-typedef VK_RESULT (VKAPI *vkGetDeviceQueueType)(VK_DEVICE device, uint32_t queueNodeIndex, uint32_t queueIndex, VK_QUEUE* pQueue);
-typedef VK_RESULT (VKAPI *vkQueueSubmitType)(VK_QUEUE queue, uint32_t cmdBufferCount, const VK_CMD_BUFFER* pCmdBuffers, VK_FENCE fence);
-typedef VK_RESULT (VKAPI *vkQueueAddMemReferenceType)(VK_QUEUE queue, VK_GPU_MEMORY mem);
-typedef VK_RESULT (VKAPI *vkQueueRemoveMemReferenceType)(VK_QUEUE queue, VK_GPU_MEMORY mem);
-typedef VK_RESULT (VKAPI *vkQueueWaitIdleType)(VK_QUEUE queue);
-typedef VK_RESULT (VKAPI *vkDeviceWaitIdleType)(VK_DEVICE device);
-typedef VK_RESULT (VKAPI *vkAllocMemoryType)(VK_DEVICE device, const VkMemoryAllocInfo* pAllocInfo, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkFreeMemoryType)(VK_GPU_MEMORY mem);
-typedef VK_RESULT (VKAPI *vkSetMemoryPriorityType)(VK_GPU_MEMORY mem, VK_MEMORY_PRIORITY priority);
-typedef VK_RESULT (VKAPI *vkMapMemoryType)(VK_GPU_MEMORY mem, VK_FLAGS flags, void** ppData);
-typedef VK_RESULT (VKAPI *vkUnmapMemoryType)(VK_GPU_MEMORY mem);
-typedef VK_RESULT (VKAPI *vkPinSystemMemoryType)(VK_DEVICE device, const void* pSysMem, size_t memSize, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkGetMultiGpuCompatibilityType)(VK_PHYSICAL_GPU gpu0, VK_PHYSICAL_GPU gpu1, VK_GPU_COMPATIBILITY_INFO* pInfo);
-typedef VK_RESULT (VKAPI *vkOpenSharedMemoryType)(VK_DEVICE device, const VK_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkOpenSharedSemaphoreType)(VK_DEVICE device, const VK_SEMAPHORE_OPEN_INFO* pOpenInfo, VK_SEMAPHORE* pSemaphore);
-typedef VK_RESULT (VKAPI *vkOpenPeerMemoryType)(VK_DEVICE device, const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkOpenPeerImageType)(VK_DEVICE device, const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem);
-typedef VK_RESULT (VKAPI *vkDestroyObjectType)(VK_OBJECT object);
-typedef VK_RESULT (VKAPI *vkGetObjectInfoType)(VK_BASE_OBJECT object, VK_OBJECT_INFO_TYPE infoType, size_t* pDataSize, void* pData);
-typedef VK_RESULT (VKAPI *vkBindObjectMemoryType)(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_MEMORY mem, VK_GPU_SIZE offset);
-typedef VK_RESULT (VKAPI *vkBindObjectMemoryRangeType)(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_SIZE rangeOffset,VK_GPU_SIZE rangeSize, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset);
-typedef VK_RESULT (VKAPI *vkBindImageMemoryRangeType)(VK_IMAGE image, uint32_t allocationIdx, const VK_IMAGE_MEMORY_BIND_INFO* bindInfo, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset);
-typedef VK_RESULT (VKAPI *vkCreateFenceType)(VK_DEVICE device, const VK_FENCE_CREATE_INFO* pCreateInfo, VK_FENCE* pFence);
-typedef VK_RESULT (VKAPI *vkResetFencesType)(VK_DEVICE device, uint32_t fenceCount, VK_FENCE* pFences);
-typedef VK_RESULT (VKAPI *vkGetFenceStatusType)(VK_FENCE fence);
-typedef VK_RESULT (VKAPI *vkWaitForFencesType)(VK_DEVICE device, uint32_t fenceCount, const VK_FENCE* pFences, bool32_t waitAll, uint64_t timeout);
-typedef VK_RESULT (VKAPI *vkCreateSemaphoreType)(VK_DEVICE device, const VK_SEMAPHORE_CREATE_INFO* pCreateInfo, VK_SEMAPHORE* pSemaphore);
-typedef VK_RESULT (VKAPI *vkQueueSignalSemaphoreType)(VK_QUEUE queue, VK_SEMAPHORE semaphore);
-typedef VK_RESULT (VKAPI *vkQueueWaitSemaphoreType)(VK_QUEUE queue, VK_SEMAPHORE semaphore);
-typedef VK_RESULT (VKAPI *vkCreateEventType)(VK_DEVICE device, const VK_EVENT_CREATE_INFO* pCreateInfo, VK_EVENT* pEvent);
-typedef VK_RESULT (VKAPI *vkGetEventStatusType)(VK_EVENT event);
-typedef VK_RESULT (VKAPI *vkSetEventType)(VK_EVENT event);
-typedef VK_RESULT (VKAPI *vkResetEventType)(VK_EVENT event);
-typedef VK_RESULT (VKAPI *vkCreateQueryPoolType)(VK_DEVICE device, const VK_QUERY_POOL_CREATE_INFO* pCreateInfo, VK_QUERY_POOL* pQueryPool);
-typedef VK_RESULT (VKAPI *vkGetQueryPoolResultsType)(VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData);
-typedef VK_RESULT (VKAPI *vkGetFormatInfoType)(VK_DEVICE device, VK_FORMAT format, VK_FORMAT_INFO_TYPE infoType, size_t* pDataSize, void* pData);
-typedef VK_RESULT (VKAPI *vkCreateBufferType)(VK_DEVICE device, const VkBufferCreateInfo* pCreateInfo, VK_BUFFER* pBuffer);
-typedef VK_RESULT (VKAPI *vkCreateBufferViewType)(VK_DEVICE device, const VkBufferViewCreateInfo* pCreateInfo, VK_BUFFER_VIEW* pView);
-typedef VK_RESULT (VKAPI *vkCreateImageType)(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage);
-typedef VK_RESULT (VKAPI *vkGetImageSubresourceInfoType)(VK_IMAGE image, const VK_IMAGE_SUBRESOURCE* pSubresource, VK_SUBRESOURCE_INFO_TYPE infoType, size_t* pDataSize, void* pData);
-typedef VK_RESULT (VKAPI *vkCreateImageViewType)(VK_DEVICE device, const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo, VK_IMAGE_VIEW* pView);
-typedef VK_RESULT (VKAPI *vkCreateColorAttachmentViewType)(VK_DEVICE device, const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo, VK_COLOR_ATTACHMENT_VIEW* pView);
-typedef VK_RESULT (VKAPI *vkCreateDepthStencilViewType)(VK_DEVICE device, const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo, VK_DEPTH_STENCIL_VIEW* pView);
-typedef VK_RESULT (VKAPI *vkCreateShaderType)(VK_DEVICE device, const VK_SHADER_CREATE_INFO* pCreateInfo, VK_SHADER* pShader);
-typedef VK_RESULT (VKAPI *vkCreateGraphicsPipelineType)(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline);
-typedef VK_RESULT (VKAPI *vkCreateGraphicsPipelineDerivativeType)(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE basePipeline, VK_PIPELINE* pPipeline);
-typedef VK_RESULT (VKAPI *vkCreateComputePipelineType)(VK_DEVICE device, const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline);
-typedef VK_RESULT (VKAPI *vkStorePipelineType)(VK_PIPELINE pipeline, size_t* pDataSize, void* pData);
-typedef VK_RESULT (VKAPI *vkLoadPipelineType)(VK_DEVICE device, size_t dataSize, const void* pData, VK_PIPELINE* pPipeline);
-typedef VK_RESULT (VKAPI *vkLoadPipelineDerivativeType)(VK_DEVICE device, size_t dataSize, const void* pData, VK_PIPELINE basePipeline, VK_PIPELINE* pPipeline);
-typedef VK_RESULT (VKAPI *vkCreateSamplerType)(VK_DEVICE device, const VK_SAMPLER_CREATE_INFO* pCreateInfo, VK_SAMPLER* pSampler);
-typedef VK_RESULT (VKAPI *vkCreateDescriptorSetLayoutType)(VK_DEVICE device, const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_SET_LAYOUT* pSetLayout);
-typedef VK_RESULT (VKAPI *vkCreateDescriptorSetLayoutChainType)(VK_DEVICE device, uint32_t setLayoutArrayCount, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayoutArray, VK_DESCRIPTOR_SET_LAYOUT_CHAIN* pLayoutChain);
-typedef VK_RESULT (VKAPI *vkBeginDescriptorPoolUpdateType)(VK_DEVICE device, VK_DESCRIPTOR_UPDATE_MODE updateMode);
-typedef VK_RESULT (VKAPI *vkEndDescriptorPoolUpdateType)(VK_DEVICE device, VK_CMD_BUFFER cmd);
-typedef VK_RESULT (VKAPI *vkCreateDescriptorPoolType)(VK_DEVICE device, VK_DESCRIPTOR_POOL_USAGE poolUsage, uint32_t maxSets, const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_POOL* pDescriptorPool);
-typedef VK_RESULT (VKAPI *vkResetDescriptorPoolType)(VK_DESCRIPTOR_POOL descriptorPool);
-typedef VK_RESULT (VKAPI *vkAllocDescriptorSetsType)(VK_DESCRIPTOR_POOL descriptorPool, VK_DESCRIPTOR_SET_USAGE setUsage, uint32_t count, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts, VK_DESCRIPTOR_SET* pDescriptorSets, uint32_t* pCount);
-typedef void (VKAPI *vkClearDescriptorSetsType)(VK_DESCRIPTOR_POOL descriptorPool, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets);
-typedef void (VKAPI *vkUpdateDescriptorsType)(VK_DESCRIPTOR_SET descriptorSet, uint32_t updateCount, const void** pUpdateArray);
-typedef VK_RESULT (VKAPI *vkCreateDynamicViewportStateType)(VK_DEVICE device, const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_VP_STATE_OBJECT* pState);
-typedef VK_RESULT (VKAPI *vkCreateDynamicRasterStateType)(VK_DEVICE device, const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_RS_STATE_OBJECT* pState);
-typedef VK_RESULT (VKAPI *vkCreateDynamicColorBlendStateType)(VK_DEVICE device, const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_CB_STATE_OBJECT* pState);
-typedef VK_RESULT (VKAPI *vkCreateDynamicDepthStencilStateType)(VK_DEVICE device, const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_DS_STATE_OBJECT* pState);
-typedef VK_RESULT (VKAPI *vkCreateCommandBufferType)(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, VK_CMD_BUFFER* pCmdBuffer);
-typedef VK_RESULT (VKAPI *vkBeginCommandBufferType)(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo);
-typedef VK_RESULT (VKAPI *vkEndCommandBufferType)(VK_CMD_BUFFER cmdBuffer);
-typedef VK_RESULT (VKAPI *vkResetCommandBufferType)(VK_CMD_BUFFER cmdBuffer);
-typedef void (VKAPI *vkCmdBindPipelineType)(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_PIPELINE pipeline);
-typedef void (VKAPI *vkCmdBindDynamicStateObjectType)(VK_CMD_BUFFER cmdBuffer, VK_STATE_BIND_POINT stateBindPoint, VK_DYNAMIC_STATE_OBJECT state);
-typedef void (VKAPI *vkCmdBindDescriptorSetsType)(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain, uint32_t layoutChainSlot, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets, const uint32_t* pUserData);
-typedef void (VKAPI *vkCmdBindIndexBufferType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, VK_INDEX_TYPE indexType);
-typedef void (VKAPI *vkCmdBindVertexBufferType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t binding);
-typedef void (VKAPI *vkCmdDrawType)(VK_CMD_BUFFER cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount);
-typedef void (VKAPI *vkCmdDrawIndexedType)(VK_CMD_BUFFER cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount);
-typedef void (VKAPI *vkCmdDrawIndirectType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride);
-typedef void (VKAPI *vkCmdDrawIndexedIndirectType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride);
-typedef void (VKAPI *vkCmdDispatchType)(VK_CMD_BUFFER cmdBuffer, uint32_t x, uint32_t y, uint32_t z);
-typedef void (VKAPI *vkCmdDispatchIndirectType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset);
-typedef void (VKAPI *vkCmdCopyBufferType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_COPY* pRegions);
-typedef void (VKAPI *vkCmdCopyImageType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_IMAGE_COPY* pRegions);
-typedef void (VKAPI *vkCmdBlitImageType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_IMAGE_BLIT* pRegions);
-typedef void (VKAPI *vkCmdCopyBufferToImageType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions);
-typedef void (VKAPI *vkCmdCopyImageToBufferType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions);
-typedef void (VKAPI *vkCmdCloneImageDataType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout);
-typedef void (VKAPI *vkCmdUpdateBufferType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE dataSize, const uint32_t* pData);
-typedef void (VKAPI *vkCmdFillBufferType)(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE fillSize, uint32_t data);
-typedef void (VKAPI *vkCmdClearColorImageType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, VK_CLEAR_COLOR color, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges);
-typedef void (VKAPI *vkCmdClearDepthStencilType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges);
-typedef void (VKAPI *vkCmdResolveImageType)(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t rectCount, const VK_IMAGE_RESOLVE* pRects);
-typedef void (VKAPI *vkCmdSetEventType)(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent);
-typedef void (VKAPI *vkCmdResetEventType)(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent);
-typedef void (VKAPI *vkCmdWaitEventsType)(VK_CMD_BUFFER cmdBuffer, const VK_EVENT_WAIT_INFO* pWaitInfo);
-typedef void (VKAPI *vkCmdPipelineBarrierType)(VK_CMD_BUFFER cmdBuffer, const VK_PIPELINE_BARRIER* pBarrier);
-typedef void (VKAPI *vkCmdBeginQueryType)(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot, VK_FLAGS flags);
-typedef void (VKAPI *vkCmdEndQueryType)(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot);
-typedef void (VKAPI *vkCmdResetQueryPoolType)(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount);
-typedef void (VKAPI *vkCmdWriteTimestampType)(VK_CMD_BUFFER cmdBuffer, VK_TIMESTAMP_TYPE timestampType, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset);
-typedef void (VKAPI *vkCmdInitAtomicCountersType)(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData);
-typedef void (VKAPI *vkCmdLoadAtomicCountersType)(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER srcBuffer, VK_GPU_SIZE srcOffset);
-typedef void (VKAPI *vkCmdSaveAtomicCountersType)(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset);
-typedef VK_RESULT (VKAPI *vkCreateFramebufferType)(VK_DEVICE device, const VK_FRAMEBUFFER_CREATE_INFO* pCreateInfo, VK_FRAMEBUFFER* pFramebuffer);
-typedef VK_RESULT (VKAPI *vkCreateRenderPassType)(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCreateInfo, VK_RENDER_PASS* pRenderPass);
-typedef void (VKAPI *vkCmdBeginRenderPassType)(VK_CMD_BUFFER cmdBuffer, const VK_RENDER_PASS_BEGIN* pRenderPassBegin);
-typedef void (VKAPI *vkCmdEndRenderPassType)(VK_CMD_BUFFER cmdBuffer, VK_RENDER_PASS renderPass);
+typedef VkResult (VKAPI *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance);
+typedef VkResult (VKAPI *PFN_vkDestroyInstance)(VkInstance instance);
+typedef VkResult (VKAPI *PFN_vkEnumerateGpus)(VkInstance instance, uint32_t maxGpus, uint32_t* pGpuCount, VkPhysicalGpu* pGpus);
+typedef VkResult (VKAPI *PFN_vkGetGpuInfo)(VkPhysicalGpu gpu, VkPhysicalGpuInfoType infoType, size_t* pDataSize, void* pData);
+typedef void * (VKAPI *PFN_vkGetProcAddr)(VkPhysicalGpu gpu, const char * pName);
+typedef VkResult (VKAPI *PFN_vkCreateDevice)(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice);
+typedef VkResult (VKAPI *PFN_vkDestroyDevice)(VkDevice device);
+typedef VkResult (VKAPI *PFN_vkGetExtensionSupport)(VkPhysicalGpu gpu, const char* pExtName);
+typedef VkResult (VKAPI *PFN_vkEnumerateLayers)(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved);
+typedef VkResult (VKAPI *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue* pQueue);
+typedef VkResult (VKAPI *PFN_vkQueueSubmit)(VkQueue queue, uint32_t cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence);
+typedef VkResult (VKAPI *PFN_vkQueueAddMemReference)(VkQueue queue, VkGpuMemory mem);
+typedef VkResult (VKAPI *PFN_vkQueueRemoveMemReference)(VkQueue queue, VkGpuMemory mem);
+typedef VkResult (VKAPI *PFN_vkQueueWaitIdle)(VkQueue queue);
+typedef VkResult (VKAPI *PFN_vkDeviceWaitIdle)(VkDevice device);
+typedef VkResult (VKAPI *PFN_vkAllocMemory)(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkFreeMemory)(VkGpuMemory mem);
+typedef VkResult (VKAPI *PFN_vkSetMemoryPriority)(VkGpuMemory mem, VkMemoryPriority priority);
+typedef VkResult (VKAPI *PFN_vkMapMemory)(VkGpuMemory mem, VkFlags flags, void** ppData);
+typedef VkResult (VKAPI *PFN_vkUnmapMemory)(VkGpuMemory mem);
+typedef VkResult (VKAPI *PFN_vkPinSystemMemory)(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkGetMultiGpuCompatibility)(VkPhysicalGpu gpu0, VkPhysicalGpu gpu1, VkGpuCompatibilityInfo* pInfo);
+typedef VkResult (VKAPI *PFN_vkOpenSharedMemory)(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkOpenSharedSemaphore)(VkDevice device, const VkSemaphoreOpenInfo* pOpenInfo, VkSemaphore* pSemaphore);
+typedef VkResult (VKAPI *PFN_vkOpenPeerMemory)(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkOpenPeerImage)(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkDestroyObject)(VkObject object);
+typedef VkResult (VKAPI *PFN_vkGetObjectInfo)(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkBindObjectMemory)(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset);
+typedef VkResult (VKAPI *PFN_vkBindObjectMemoryRange)(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset);
+typedef VkResult (VKAPI *PFN_vkBindImageMemoryRange)(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset);
+typedef VkResult (VKAPI *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence);
+typedef VkResult (VKAPI *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, VkFence* pFences);
+typedef VkResult (VKAPI *PFN_vkGetFenceStatus)(VkFence fence);
+typedef VkResult (VKAPI *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, bool32_t waitAll, uint64_t timeout);
+typedef VkResult (VKAPI *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore);
+typedef VkResult (VKAPI *PFN_vkQueueSignalSemaphore)(VkQueue queue, VkSemaphore semaphore);
+typedef VkResult (VKAPI *PFN_vkQueueWaitSemaphore)(VkQueue queue, VkSemaphore semaphore);
+typedef VkResult (VKAPI *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent);
+typedef VkResult (VKAPI *PFN_vkGetEventStatus)(VkEvent event);
+typedef VkResult (VKAPI *PFN_vkSetEvent)(VkEvent event);
+typedef VkResult (VKAPI *PFN_vkResetEvent)(VkEvent event);
+typedef VkResult (VKAPI *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool);
+typedef VkResult (VKAPI *PFN_vkGetQueryPoolResults)(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkGetFormatInfo)(VkDevice device, VkFormat format, VkFormatInfoType infoType, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer);
+typedef VkResult (VKAPI *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView);
+typedef VkResult (VKAPI *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage);
+typedef VkResult (VKAPI *PFN_vkGetImageSubresourceInfo)(VkImage image, const VkImageSubresource* pSubresource, VkSubresourceInfoType infoType, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView);
+typedef VkResult (VKAPI *PFN_vkCreateColorAttachmentView)(VkDevice device, const VkColorAttachmentViewCreateInfo* pCreateInfo, VkColorAttachmentView* pView);
+typedef VkResult (VKAPI *PFN_vkCreateDepthStencilView)(VkDevice device, const VkDepthStencilViewCreateInfo* pCreateInfo, VkDepthStencilView* pView);
+typedef VkResult (VKAPI *PFN_vkCreateShader)(VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader);
+typedef VkResult (VKAPI *PFN_vkCreateGraphicsPipeline)(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline);
+typedef VkResult (VKAPI *PFN_vkCreateGraphicsPipelineDerivative)(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline basePipeline, VkPipeline* pPipeline);
+typedef VkResult (VKAPI *PFN_vkCreateComputePipeline)(VkDevice device, const VkComputePipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline);
+typedef VkResult (VKAPI *PFN_vkStorePipeline)(VkPipeline pipeline, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkLoadPipeline)(VkDevice device, size_t dataSize, const void* pData, VkPipeline* pPipeline);
+typedef VkResult (VKAPI *PFN_vkLoadPipelineDerivative)(VkDevice device, size_t dataSize, const void* pData, VkPipeline basePipeline, VkPipeline* pPipeline);
+typedef VkResult (VKAPI *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler);
+typedef VkResult (VKAPI *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout);
+typedef VkResult (VKAPI *PFN_vkCreateDescriptorSetLayoutChain)(VkDevice device, uint32_t setLayoutArrayCount, const VkDescriptorSetLayout* pSetLayoutArray, VkDescriptorSetLayoutChain* pLayoutChain);
+typedef VkResult (VKAPI *PFN_vkBeginDescriptorPoolUpdate)(VkDevice device, VkDescriptorUpdateMode updateMode);
+typedef VkResult (VKAPI *PFN_vkEndDescriptorPoolUpdate)(VkDevice device, VkCmdBuffer cmd);
+typedef VkResult (VKAPI *PFN_vkCreateDescriptorPool)(VkDevice device, VkDescriptorPoolUsage poolUsage, uint32_t maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool);
+typedef VkResult (VKAPI *PFN_vkResetDescriptorPool)(VkDescriptorPool descriptorPool);
+typedef VkResult (VKAPI *PFN_vkAllocDescriptorSets)(VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, uint32_t count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, uint32_t* pCount);
+typedef void (VKAPI *PFN_vkClearDescriptorSets)(VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets);
+typedef void (VKAPI *PFN_vkUpdateDescriptors)(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** pUpdateArray);
+typedef VkResult (VKAPI *PFN_vkCreateDynamicViewportState)(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpStateObject* pState);
+typedef VkResult (VKAPI *PFN_vkCreateDynamicRasterState)(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsStateObject* pState);
+typedef VkResult (VKAPI *PFN_vkCreateDynamicColorBlendState)(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbStateObject* pState);
+typedef VkResult (VKAPI *PFN_vkCreateDynamicDepthStencilState)(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo, VkDynamicDsStateObject* pState);
+typedef VkResult (VKAPI *PFN_vkCreateCommandBuffer)(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer);
+typedef VkResult (VKAPI *PFN_vkBeginCommandBuffer)(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo);
+typedef VkResult (VKAPI *PFN_vkEndCommandBuffer)(VkCmdBuffer cmdBuffer);
+typedef VkResult (VKAPI *PFN_vkResetCommandBuffer)(VkCmdBuffer cmdBuffer);
+typedef void (VKAPI *PFN_vkCmdBindPipeline)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+typedef void (VKAPI *PFN_vkCmdBindDynamicStateObject)(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state);
+typedef void (VKAPI *PFN_vkCmdBindDescriptorSets)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkDescriptorSetLayoutChain layoutChain, uint32_t layoutChainSlot, uint32_t count, const VkDescriptorSet* pDescriptorSets, const uint32_t* pUserData);
+typedef void (VKAPI *PFN_vkCmdBindIndexBuffer)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType);
+typedef void (VKAPI *PFN_vkCmdBindVertexBuffer)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t binding);
+typedef void (VKAPI *PFN_vkCmdDraw)(VkCmdBuffer cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount);
+typedef void (VKAPI *PFN_vkCmdDrawIndexed)(VkCmdBuffer cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount);
+typedef void (VKAPI *PFN_vkCmdDrawIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride);
+typedef void (VKAPI *PFN_vkCmdDrawIndexedIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride);
+typedef void (VKAPI *PFN_vkCmdDispatch)(VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, uint32_t z);
+typedef void (VKAPI *PFN_vkCmdDispatchIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset);
+typedef void (VKAPI *PFN_vkCmdCopyBuffer)(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
+typedef void (VKAPI *PFN_vkCmdCopyImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+typedef void (VKAPI *PFN_vkCmdBlitImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageBlit* pRegions);
+typedef void (VKAPI *PFN_vkCmdCopyBufferToImage)(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI *PFN_vkCmdCopyImageToBuffer)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+typedef void (VKAPI *PFN_vkCmdCloneImageData)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout);
+typedef void (VKAPI *PFN_vkCmdUpdateBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData);
+typedef void (VKAPI *PFN_vkCmdFillBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data);
+typedef void (VKAPI *PFN_vkCmdClearColorImage)(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, VkClearColor color, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI *PFN_vkCmdClearDepthStencil)(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
+typedef void (VKAPI *PFN_vkCmdResolveImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t rectCount, const VkImageResolve* pRects);
+typedef void (VKAPI *PFN_vkCmdSetEvent)(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent);
+typedef void (VKAPI *PFN_vkCmdResetEvent)(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent);
+typedef void (VKAPI *PFN_vkCmdWaitEvents)(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo);
+typedef void (VKAPI *PFN_vkCmdPipelineBarrier)(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier);
+typedef void (VKAPI *PFN_vkCmdBeginQuery)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
+typedef void (VKAPI *PFN_vkCmdEndQuery)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot);
+typedef void (VKAPI *PFN_vkCmdResetQueryPool)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount);
+typedef void (VKAPI *PFN_vkCmdWriteTimestamp)(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset);
+typedef void (VKAPI *PFN_vkCmdInitAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData);
+typedef void (VKAPI *PFN_vkCmdLoadAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset);
+typedef void (VKAPI *PFN_vkCmdSaveAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset);
+typedef VkResult (VKAPI *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer);
+typedef VkResult (VKAPI *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass);
+typedef void (VKAPI *PFN_vkCmdBeginRenderPass)(VkCmdBuffer cmdBuffer, const VkRenderPassBegin* pRenderPassBegin);
+typedef void (VKAPI *PFN_vkCmdEndRenderPass)(VkCmdBuffer cmdBuffer, VkRenderPass renderPass);
#ifdef VK_PROTOTYPES
// GPU initialization
-VK_RESULT VKAPI vkCreateInstance(
+VkResult VKAPI vkCreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
- VK_INSTANCE* pInstance);
+ VkInstance* pInstance);
-VK_RESULT VKAPI vkDestroyInstance(
- VK_INSTANCE instance);
+VkResult VKAPI vkDestroyInstance(
+ VkInstance instance);
-VK_RESULT VKAPI vkEnumerateGpus(
- VK_INSTANCE instance,
+VkResult VKAPI vkEnumerateGpus(
+ VkInstance instance,
uint32_t maxGpus,
uint32_t* pGpuCount,
- VK_PHYSICAL_GPU* pGpus);
+ VkPhysicalGpu* pGpus);
-VK_RESULT VKAPI vkGetGpuInfo(
- VK_PHYSICAL_GPU gpu,
- VK_PHYSICAL_GPU_INFO_TYPE infoType,
+VkResult VKAPI vkGetGpuInfo(
+ VkPhysicalGpu gpu,
+ VkPhysicalGpuInfoType infoType,
size_t* pDataSize,
void* pData);
void * VKAPI vkGetProcAddr(
- VK_PHYSICAL_GPU gpu,
+ VkPhysicalGpu gpu,
const char* pName);
// Device functions
-VK_RESULT VKAPI vkCreateDevice(
- VK_PHYSICAL_GPU gpu,
+VkResult VKAPI vkCreateDevice(
+ VkPhysicalGpu gpu,
const VkDeviceCreateInfo* pCreateInfo,
- VK_DEVICE* pDevice);
+ VkDevice* pDevice);
-VK_RESULT VKAPI vkDestroyDevice(
- VK_DEVICE device);
+VkResult VKAPI vkDestroyDevice(
+ VkDevice device);
// Extension discovery functions
-VK_RESULT VKAPI vkGetExtensionSupport(
- VK_PHYSICAL_GPU gpu,
+VkResult VKAPI vkGetExtensionSupport(
+ VkPhysicalGpu gpu,
const char* pExtName);
// Layer discovery functions
-VK_RESULT VKAPI vkEnumerateLayers(
- VK_PHYSICAL_GPU gpu,
+VkResult VKAPI vkEnumerateLayers(
+ VkPhysicalGpu gpu,
size_t maxLayerCount,
size_t maxStringSize,
size_t* pOutLayerCount,
@@ -2436,182 +2436,182 @@ VK_RESULT VKAPI vkEnumerateLayers(
// Queue functions
-VK_RESULT VKAPI vkGetDeviceQueue(
- VK_DEVICE device,
+VkResult VKAPI vkGetDeviceQueue(
+ VkDevice device,
uint32_t queueNodeIndex,
uint32_t queueIndex,
- VK_QUEUE* pQueue);
+ VkQueue* pQueue);
-VK_RESULT VKAPI vkQueueSubmit(
- VK_QUEUE queue,
+VkResult VKAPI vkQueueSubmit(
+ VkQueue queue,
uint32_t cmdBufferCount,
- const VK_CMD_BUFFER* pCmdBuffers,
- VK_FENCE fence);
+ const VkCmdBuffer* pCmdBuffers,
+ VkFence fence);
-VK_RESULT VKAPI vkQueueAddMemReference(
- VK_QUEUE queue,
- VK_GPU_MEMORY mem);
+VkResult VKAPI vkQueueAddMemReference(
+ VkQueue queue,
+ VkGpuMemory mem);
-VK_RESULT VKAPI vkQueueRemoveMemReference(
- VK_QUEUE queue,
- VK_GPU_MEMORY mem);
+VkResult VKAPI vkQueueRemoveMemReference(
+ VkQueue queue,
+ VkGpuMemory mem);
-VK_RESULT VKAPI vkQueueWaitIdle(
- VK_QUEUE queue);
+VkResult VKAPI vkQueueWaitIdle(
+ VkQueue queue);
-VK_RESULT VKAPI vkDeviceWaitIdle(
- VK_DEVICE device);
+VkResult VKAPI vkDeviceWaitIdle(
+ VkDevice device);
// Memory functions
-VK_RESULT VKAPI vkAllocMemory(
- VK_DEVICE device,
+VkResult VKAPI vkAllocMemory(
+ VkDevice device,
const VkMemoryAllocInfo* pAllocInfo,
- VK_GPU_MEMORY* pMem);
+ VkGpuMemory* pMem);
-VK_RESULT VKAPI vkFreeMemory(
- VK_GPU_MEMORY mem);
+VkResult VKAPI vkFreeMemory(
+ VkGpuMemory mem);
-VK_RESULT VKAPI vkSetMemoryPriority(
- VK_GPU_MEMORY mem,
- VK_MEMORY_PRIORITY priority);
+VkResult VKAPI vkSetMemoryPriority(
+ VkGpuMemory mem,
+ VkMemoryPriority priority);
-VK_RESULT VKAPI vkMapMemory(
- VK_GPU_MEMORY mem,
- VK_FLAGS flags, // Reserved
+VkResult VKAPI vkMapMemory(
+ VkGpuMemory mem,
+ VkFlags flags, // Reserved
void** ppData);
-VK_RESULT VKAPI vkUnmapMemory(
- VK_GPU_MEMORY mem);
+VkResult VKAPI vkUnmapMemory(
+ VkGpuMemory mem);
-VK_RESULT VKAPI vkPinSystemMemory(
- VK_DEVICE device,
+VkResult VKAPI vkPinSystemMemory(
+ VkDevice device,
const void* pSysMem,
size_t memSize,
- VK_GPU_MEMORY* pMem);
+ VkGpuMemory* pMem);
// Multi-device functions
-VK_RESULT VKAPI vkGetMultiGpuCompatibility(
- VK_PHYSICAL_GPU gpu0,
- VK_PHYSICAL_GPU gpu1,
- VK_GPU_COMPATIBILITY_INFO* pInfo);
+VkResult VKAPI vkGetMultiGpuCompatibility(
+ VkPhysicalGpu gpu0,
+ VkPhysicalGpu gpu1,
+ VkGpuCompatibilityInfo* pInfo);
-VK_RESULT VKAPI vkOpenSharedMemory(
- VK_DEVICE device,
- const VK_MEMORY_OPEN_INFO* pOpenInfo,
- VK_GPU_MEMORY* pMem);
+VkResult VKAPI vkOpenSharedMemory(
+ VkDevice device,
+ const VkMemoryOpenInfo* pOpenInfo,
+ VkGpuMemory* pMem);
-VK_RESULT VKAPI vkOpenSharedSemaphore(
- VK_DEVICE device,
- const VK_SEMAPHORE_OPEN_INFO* pOpenInfo,
- VK_SEMAPHORE* pSemaphore);
+VkResult VKAPI vkOpenSharedSemaphore(
+ VkDevice device,
+ const VkSemaphoreOpenInfo* pOpenInfo,
+ VkSemaphore* pSemaphore);
-VK_RESULT VKAPI vkOpenPeerMemory(
- VK_DEVICE device,
- const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo,
- VK_GPU_MEMORY* pMem);
+VkResult VKAPI vkOpenPeerMemory(
+ VkDevice device,
+ const VkPeerMemoryOpenInfo* pOpenInfo,
+ VkGpuMemory* pMem);
-VK_RESULT VKAPI vkOpenPeerImage(
- VK_DEVICE device,
- const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo,
- VK_IMAGE* pImage,
- VK_GPU_MEMORY* pMem);
+VkResult VKAPI vkOpenPeerImage(
+ VkDevice device,
+ const VkPeerImageOpenInfo* pOpenInfo,
+ VkImage* pImage,
+ VkGpuMemory* pMem);
// Generic API object functions
-VK_RESULT VKAPI vkDestroyObject(
- VK_OBJECT object);
+VkResult VKAPI vkDestroyObject(
+ VkObject object);
-VK_RESULT VKAPI vkGetObjectInfo(
- VK_BASE_OBJECT object,
- VK_OBJECT_INFO_TYPE infoType,
+VkResult VKAPI vkGetObjectInfo(
+ VkBaseObject object,
+ VkObjectInfoType infoType,
size_t* pDataSize,
void* pData);
-VK_RESULT VKAPI vkBindObjectMemory(
- VK_OBJECT object,
+VkResult VKAPI vkBindObjectMemory(
+ VkObject object,
uint32_t allocationIdx,
- VK_GPU_MEMORY mem,
- VK_GPU_SIZE memOffset);
+ VkGpuMemory mem,
+ VkGpuSize memOffset);
-VK_RESULT VKAPI vkBindObjectMemoryRange(
- VK_OBJECT object,
+VkResult VKAPI vkBindObjectMemoryRange(
+ VkObject object,
uint32_t allocationIdx,
- VK_GPU_SIZE rangeOffset,
- VK_GPU_SIZE rangeSize,
- VK_GPU_MEMORY mem,
- VK_GPU_SIZE memOffset);
+ VkGpuSize rangeOffset,
+ VkGpuSize rangeSize,
+ VkGpuMemory mem,
+ VkGpuSize memOffset);
-VK_RESULT VKAPI vkBindImageMemoryRange(
- VK_IMAGE image,
+VkResult VKAPI vkBindImageMemoryRange(
+ VkImage image,
uint32_t allocationIdx,
- const VK_IMAGE_MEMORY_BIND_INFO* bindInfo,
- VK_GPU_MEMORY mem,
- VK_GPU_SIZE memOffset);
+ const VkImageMemoryBindInfo* bindInfo,
+ VkGpuMemory mem,
+ VkGpuSize memOffset);
// Fence functions
-VK_RESULT VKAPI vkCreateFence(
- VK_DEVICE device,
- const VK_FENCE_CREATE_INFO* pCreateInfo,
- VK_FENCE* pFence);
+VkResult VKAPI vkCreateFence(
+ VkDevice device,
+ const VkFenceCreateInfo* pCreateInfo,
+ VkFence* pFence);
-VK_RESULT VKAPI vkResetFences(
- VK_DEVICE device,
+VkResult VKAPI vkResetFences(
+ VkDevice device,
uint32_t fenceCount,
- VK_FENCE* pFences);
+ VkFence* pFences);
-VK_RESULT VKAPI vkGetFenceStatus(
- VK_FENCE fence);
+VkResult VKAPI vkGetFenceStatus(
+ VkFence fence);
-VK_RESULT VKAPI vkWaitForFences(
- VK_DEVICE device,
+VkResult VKAPI vkWaitForFences(
+ VkDevice device,
uint32_t fenceCount,
- const VK_FENCE* pFences,
+ const VkFence* pFences,
bool32_t waitAll,
uint64_t timeout); // timeout in nanoseconds
// Queue semaphore functions
-VK_RESULT VKAPI vkCreateSemaphore(
- VK_DEVICE device,
- const VK_SEMAPHORE_CREATE_INFO* pCreateInfo,
- VK_SEMAPHORE* pSemaphore);
+VkResult VKAPI vkCreateSemaphore(
+ VkDevice device,
+ const VkSemaphoreCreateInfo* pCreateInfo,
+ VkSemaphore* pSemaphore);
-VK_RESULT VKAPI vkQueueSignalSemaphore(
- VK_QUEUE queue,
- VK_SEMAPHORE semaphore);
+VkResult VKAPI vkQueueSignalSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore);
-VK_RESULT VKAPI vkQueueWaitSemaphore(
- VK_QUEUE queue,
- VK_SEMAPHORE semaphore);
+VkResult VKAPI vkQueueWaitSemaphore(
+ VkQueue queue,
+ VkSemaphore semaphore);
// Event functions
-VK_RESULT VKAPI vkCreateEvent(
- VK_DEVICE device,
- const VK_EVENT_CREATE_INFO* pCreateInfo,
- VK_EVENT* pEvent);
+VkResult VKAPI vkCreateEvent(
+ VkDevice device,
+ const VkEventCreateInfo* pCreateInfo,
+ VkEvent* pEvent);
-VK_RESULT VKAPI vkGetEventStatus(
- VK_EVENT event);
+VkResult VKAPI vkGetEventStatus(
+ VkEvent event);
-VK_RESULT VKAPI vkSetEvent(
- VK_EVENT event);
+VkResult VKAPI vkSetEvent(
+ VkEvent event);
-VK_RESULT VKAPI vkResetEvent(
- VK_EVENT event);
+VkResult VKAPI vkResetEvent(
+ VkEvent event);
// Query functions
-VK_RESULT VKAPI vkCreateQueryPool(
- VK_DEVICE device,
- const VK_QUERY_POOL_CREATE_INFO* pCreateInfo,
- VK_QUERY_POOL* pQueryPool);
+VkResult VKAPI vkCreateQueryPool(
+ VkDevice device,
+ const VkQueryPoolCreateInfo* pCreateInfo,
+ VkQueryPool* pQueryPool);
-VK_RESULT VKAPI vkGetQueryPoolResults(
- VK_QUERY_POOL queryPool,
+VkResult VKAPI vkGetQueryPoolResults(
+ VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount,
size_t* pDataSize,
@@ -2619,238 +2619,238 @@ VK_RESULT VKAPI vkGetQueryPoolResults(
// Format capabilities
-VK_RESULT VKAPI vkGetFormatInfo(
- VK_DEVICE device,
- VK_FORMAT format,
- VK_FORMAT_INFO_TYPE infoType,
+VkResult VKAPI vkGetFormatInfo(
+ VkDevice device,
+ VkFormat format,
+ VkFormatInfoType infoType,
size_t* pDataSize,
void* pData);
// Buffer functions
-VK_RESULT VKAPI vkCreateBuffer(
- VK_DEVICE device,
+VkResult VKAPI vkCreateBuffer(
+ VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
- VK_BUFFER* pBuffer);
+ VkBuffer* pBuffer);
// Buffer view functions
-VK_RESULT VKAPI vkCreateBufferView(
- VK_DEVICE device,
+VkResult VKAPI vkCreateBufferView(
+ VkDevice device,
const VkBufferViewCreateInfo* pCreateInfo,
- VK_BUFFER_VIEW* pView);
+ VkBufferView* pView);
// Image functions
-VK_RESULT VKAPI vkCreateImage(
- VK_DEVICE device,
- const VK_IMAGE_CREATE_INFO* pCreateInfo,
- VK_IMAGE* pImage);
+VkResult VKAPI vkCreateImage(
+ VkDevice device,
+ const VkImageCreateInfo* pCreateInfo,
+ VkImage* pImage);
-VK_RESULT VKAPI vkGetImageSubresourceInfo(
- VK_IMAGE image,
- const VK_IMAGE_SUBRESOURCE* pSubresource,
- VK_SUBRESOURCE_INFO_TYPE infoType,
+VkResult VKAPI vkGetImageSubresourceInfo(
+ VkImage image,
+ const VkImageSubresource* pSubresource,
+ VkSubresourceInfoType infoType,
size_t* pDataSize,
void* pData);
// Image view functions
-VK_RESULT VKAPI vkCreateImageView(
- VK_DEVICE device,
- const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
- VK_IMAGE_VIEW* pView);
+VkResult VKAPI vkCreateImageView(
+ VkDevice device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ VkImageView* pView);
-VK_RESULT VKAPI vkCreateColorAttachmentView(
- VK_DEVICE device,
- const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
- VK_COLOR_ATTACHMENT_VIEW* pView);
+VkResult VKAPI vkCreateColorAttachmentView(
+ VkDevice device,
+ const VkColorAttachmentViewCreateInfo* pCreateInfo,
+ VkColorAttachmentView* pView);
-VK_RESULT VKAPI vkCreateDepthStencilView(
- VK_DEVICE device,
- const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
- VK_DEPTH_STENCIL_VIEW* pView);
+VkResult VKAPI vkCreateDepthStencilView(
+ VkDevice device,
+ const VkDepthStencilViewCreateInfo* pCreateInfo,
+ VkDepthStencilView* pView);
// Shader functions
-VK_RESULT VKAPI vkCreateShader(
- VK_DEVICE device,
- const VK_SHADER_CREATE_INFO* pCreateInfo,
- VK_SHADER* pShader);
+VkResult VKAPI vkCreateShader(
+ VkDevice device,
+ const VkShaderCreateInfo* pCreateInfo,
+ VkShader* pShader);
// Pipeline functions
-VK_RESULT VKAPI vkCreateGraphicsPipeline(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE* pPipeline);
+VkResult VKAPI vkCreateGraphicsPipeline(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline* pPipeline);
-VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline);
+VkResult VKAPI vkCreateGraphicsPipelineDerivative(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline);
-VK_RESULT VKAPI vkCreateComputePipeline(
- VK_DEVICE device,
- const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE* pPipeline);
+VkResult VKAPI vkCreateComputePipeline(
+ VkDevice device,
+ const VkComputePipelineCreateInfo* pCreateInfo,
+ VkPipeline* pPipeline);
-VK_RESULT VKAPI vkStorePipeline(
- VK_PIPELINE pipeline,
+VkResult VKAPI vkStorePipeline(
+ VkPipeline pipeline,
size_t* pDataSize,
void* pData);
-VK_RESULT VKAPI vkLoadPipeline(
- VK_DEVICE device,
+VkResult VKAPI vkLoadPipeline(
+ VkDevice device,
size_t dataSize,
const void* pData,
- VK_PIPELINE* pPipeline);
+ VkPipeline* pPipeline);
-VK_RESULT VKAPI vkLoadPipelineDerivative(
- VK_DEVICE device,
+VkResult VKAPI vkLoadPipelineDerivative(
+ VkDevice device,
size_t dataSize,
const void* pData,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline);
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline);
// Sampler functions
-VK_RESULT VKAPI vkCreateSampler(
- VK_DEVICE device,
- const VK_SAMPLER_CREATE_INFO* pCreateInfo,
- VK_SAMPLER* pSampler);
+VkResult VKAPI vkCreateSampler(
+ VkDevice device,
+ const VkSamplerCreateInfo* pCreateInfo,
+ VkSampler* pSampler);
// Descriptor set functions
-VK_RESULT VKAPI vkCreateDescriptorSetLayout(
- VK_DEVICE device,
- const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo,
- VK_DESCRIPTOR_SET_LAYOUT* pSetLayout);
+VkResult VKAPI vkCreateDescriptorSetLayout(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ VkDescriptorSetLayout* pSetLayout);
-VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(
- VK_DEVICE device,
+VkResult VKAPI vkCreateDescriptorSetLayoutChain(
+ VkDevice device,
uint32_t setLayoutArrayCount,
- const VK_DESCRIPTOR_SET_LAYOUT* pSetLayoutArray,
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN* pLayoutChain);
+ const VkDescriptorSetLayout* pSetLayoutArray,
+ VkDescriptorSetLayoutChain* pLayoutChain);
-VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(
- VK_DEVICE device,
- VK_DESCRIPTOR_UPDATE_MODE updateMode);
+VkResult VKAPI vkBeginDescriptorPoolUpdate(
+ VkDevice device,
+ VkDescriptorUpdateMode updateMode);
-VK_RESULT VKAPI vkEndDescriptorPoolUpdate(
- VK_DEVICE device,
- VK_CMD_BUFFER cmd);
+VkResult VKAPI vkEndDescriptorPoolUpdate(
+ VkDevice device,
+ VkCmdBuffer cmd);
-VK_RESULT VKAPI vkCreateDescriptorPool(
- VK_DEVICE device,
- VK_DESCRIPTOR_POOL_USAGE poolUsage,
+VkResult VKAPI vkCreateDescriptorPool(
+ VkDevice device,
+ VkDescriptorPoolUsage poolUsage,
uint32_t maxSets,
- const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo,
- VK_DESCRIPTOR_POOL* pDescriptorPool);
+ const VkDescriptorPoolCreateInfo* pCreateInfo,
+ VkDescriptorPool* pDescriptorPool);
-VK_RESULT VKAPI vkResetDescriptorPool(
- VK_DESCRIPTOR_POOL descriptorPool);
+VkResult VKAPI vkResetDescriptorPool(
+ VkDescriptorPool descriptorPool);
-VK_RESULT VKAPI vkAllocDescriptorSets(
- VK_DESCRIPTOR_POOL descriptorPool,
- VK_DESCRIPTOR_SET_USAGE setUsage,
+VkResult VKAPI vkAllocDescriptorSets(
+ VkDescriptorPool descriptorPool,
+ VkDescriptorSetUsage setUsage,
uint32_t count,
- const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts,
- VK_DESCRIPTOR_SET* pDescriptorSets,
+ const VkDescriptorSetLayout* pSetLayouts,
+ VkDescriptorSet* pDescriptorSets,
uint32_t* pCount);
void VKAPI vkClearDescriptorSets(
- VK_DESCRIPTOR_POOL descriptorPool,
+ VkDescriptorPool descriptorPool,
uint32_t count,
- const VK_DESCRIPTOR_SET* pDescriptorSets);
+ const VkDescriptorSet* pDescriptorSets);
void VKAPI vkUpdateDescriptors(
- VK_DESCRIPTOR_SET descriptorSet,
+ VkDescriptorSet descriptorSet,
uint32_t updateCount,
const void** pUpdateArray);
// State object functions
-VK_RESULT VKAPI vkCreateDynamicViewportState(
- VK_DEVICE device,
- const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_VP_STATE_OBJECT* pState);
+VkResult VKAPI vkCreateDynamicViewportState(
+ VkDevice device,
+ const VkDynamicVpStateCreateInfo* pCreateInfo,
+ VkDynamicVpStateObject* pState);
-VK_RESULT VKAPI vkCreateDynamicRasterState(
- VK_DEVICE device,
- const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_RS_STATE_OBJECT* pState);
+VkResult VKAPI vkCreateDynamicRasterState(
+ VkDevice device,
+ const VkDynamicRsStateCreateInfo* pCreateInfo,
+ VkDynamicRsStateObject* pState);
-VK_RESULT VKAPI vkCreateDynamicColorBlendState(
- VK_DEVICE device,
- const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_CB_STATE_OBJECT* pState);
+VkResult VKAPI vkCreateDynamicColorBlendState(
+ VkDevice device,
+ const VkDynamicCbStateCreateInfo* pCreateInfo,
+ VkDynamicCbStateObject* pState);
-VK_RESULT VKAPI vkCreateDynamicDepthStencilState(
- VK_DEVICE device,
- const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_DS_STATE_OBJECT* pState);
+VkResult VKAPI vkCreateDynamicDepthStencilState(
+ VkDevice device,
+ const VkDynamicDsStateCreateInfo* pCreateInfo,
+ VkDynamicDsStateObject* pState);
// Command buffer functions
-VK_RESULT VKAPI vkCreateCommandBuffer(
- VK_DEVICE device,
- const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo,
- VK_CMD_BUFFER* pCmdBuffer);
+VkResult VKAPI vkCreateCommandBuffer(
+ VkDevice device,
+ const VkCmdBufferCreateInfo* pCreateInfo,
+ VkCmdBuffer* pCmdBuffer);
-VK_RESULT VKAPI vkBeginCommandBuffer(
- VK_CMD_BUFFER cmdBuffer,
- const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo);
+VkResult VKAPI vkBeginCommandBuffer(
+ VkCmdBuffer cmdBuffer,
+ const VkCmdBufferBeginInfo* pBeginInfo);
-VK_RESULT VKAPI vkEndCommandBuffer(
- VK_CMD_BUFFER cmdBuffer);
+VkResult VKAPI vkEndCommandBuffer(
+ VkCmdBuffer cmdBuffer);
-VK_RESULT VKAPI vkResetCommandBuffer(
- VK_CMD_BUFFER cmdBuffer);
+VkResult VKAPI vkResetCommandBuffer(
+ VkCmdBuffer cmdBuffer);
// Command buffer building functions
void VKAPI vkCmdBindPipeline(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
- VK_PIPELINE pipeline);
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline);
void VKAPI vkCmdBindDynamicStateObject(
- VK_CMD_BUFFER cmdBuffer,
- VK_STATE_BIND_POINT stateBindPoint,
- VK_DYNAMIC_STATE_OBJECT dynamicState);
+ VkCmdBuffer cmdBuffer,
+ VkStateBindPoint stateBindPoint,
+ VkDynamicStateObject dynamicState);
void VKAPI vkCmdBindDescriptorSets(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkDescriptorSetLayoutChain layoutChain,
uint32_t layoutChainSlot,
uint32_t count,
- const VK_DESCRIPTOR_SET* pDescriptorSets,
+ const VkDescriptorSet* pDescriptorSets,
const uint32_t * pUserData);
void VKAPI vkCmdBindIndexBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
- VK_INDEX_TYPE indexType);
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
+ VkIndexType indexType);
void VKAPI vkCmdBindVertexBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t binding);
void VKAPI vkCmdDraw(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t firstVertex,
uint32_t vertexCount,
uint32_t firstInstance,
uint32_t instanceCount);
void VKAPI vkCmdDrawIndexed(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t firstIndex,
uint32_t indexCount,
int32_t vertexOffset,
@@ -2858,198 +2858,198 @@ void VKAPI vkCmdDrawIndexed(
uint32_t instanceCount);
void VKAPI vkCmdDrawIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t count,
uint32_t stride);
void VKAPI vkCmdDrawIndexedIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset,
uint32_t count,
uint32_t stride);
void VKAPI vkCmdDispatch(
- VK_CMD_BUFFER cmdBuffer,
+ VkCmdBuffer cmdBuffer,
uint32_t x,
uint32_t y,
uint32_t z);
void VKAPI vkCmdDispatchIndirect(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER buffer,
- VK_GPU_SIZE offset);
+ VkCmdBuffer cmdBuffer,
+ VkBuffer buffer,
+ VkGpuSize offset);
void VKAPI vkCmdCopyBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_BUFFER destBuffer,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkBuffer destBuffer,
uint32_t regionCount,
- const VK_BUFFER_COPY* pRegions);
+ const VkBufferCopy* pRegions);
void VKAPI vkCmdCopyImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_IMAGE_COPY* pRegions);
+ const VkImageCopy* pRegions);
void VKAPI vkCmdBlitImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_IMAGE_BLIT* pRegions);
+ const VkImageBlit* pRegions);
void VKAPI vkCmdCopyBufferToImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t regionCount,
- const VK_BUFFER_IMAGE_COPY* pRegions);
+ const VkBufferImageCopy* pRegions);
void VKAPI vkCmdCopyImageToBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_BUFFER destBuffer,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
uint32_t regionCount,
- const VK_BUFFER_IMAGE_COPY* pRegions);
+ const VkBufferImageCopy* pRegions);
void VKAPI vkCmdCloneImageData(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout);
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout);
void VKAPI vkCmdUpdateBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset,
- VK_GPU_SIZE dataSize,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset,
+ VkGpuSize dataSize,
const uint32_t* pData);
void VKAPI vkCmdFillBuffer(
- VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset,
- VK_GPU_SIZE fillSize,
+ VkCmdBuffer cmdBuffer,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset,
+ VkGpuSize fillSize,
uint32_t data);
void VKAPI vkCmdClearColorImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT imageLayout,
- VK_CLEAR_COLOR color,
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
+ VkClearColor color,
uint32_t rangeCount,
- const VK_IMAGE_SUBRESOURCE_RANGE* pRanges);
+ const VkImageSubresourceRange* pRanges);
void VKAPI vkCmdClearDepthStencil(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image,
- VK_IMAGE_LAYOUT imageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage image,
+ VkImageLayout imageLayout,
float depth,
uint32_t stencil,
uint32_t rangeCount,
- const VK_IMAGE_SUBRESOURCE_RANGE* pRanges);
+ const VkImageSubresourceRange* pRanges);
void VKAPI vkCmdResolveImage(
- VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
+ VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
uint32_t rectCount,
- const VK_IMAGE_RESOLVE* pRects);
+ const VkImageResolve* pRects);
void VKAPI vkCmdSetEvent(
- VK_CMD_BUFFER cmdBuffer,
- VK_EVENT event,
- VK_PIPE_EVENT pipeEvent);
+ VkCmdBuffer cmdBuffer,
+ VkEvent event,
+ VkPipeEvent pipeEvent);
void VKAPI vkCmdResetEvent(
- VK_CMD_BUFFER cmdBuffer,
- VK_EVENT event,
- VK_PIPE_EVENT pipeEvent);
+ VkCmdBuffer cmdBuffer,
+ VkEvent event,
+ VkPipeEvent pipeEvent);
void VKAPI vkCmdWaitEvents(
- VK_CMD_BUFFER cmdBuffer,
- const VK_EVENT_WAIT_INFO* pWaitInfo);
+ VkCmdBuffer cmdBuffer,
+ const VkEventWaitInfo* pWaitInfo);
void VKAPI vkCmdPipelineBarrier(
- VK_CMD_BUFFER cmdBuffer,
- const VK_PIPELINE_BARRIER* pBarrier);
+ VkCmdBuffer cmdBuffer,
+ const VkPipelineBarrier* pBarrier);
void VKAPI vkCmdBeginQuery(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t slot,
- VK_FLAGS flags);
+ VkFlags flags);
void VKAPI vkCmdEndQuery(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t slot);
void VKAPI vkCmdResetQueryPool(
- VK_CMD_BUFFER cmdBuffer,
- VK_QUERY_POOL queryPool,
+ VkCmdBuffer cmdBuffer,
+ VkQueryPool queryPool,
uint32_t startQuery,
uint32_t queryCount);
void VKAPI vkCmdWriteTimestamp(
- VK_CMD_BUFFER cmdBuffer,
- VK_TIMESTAMP_TYPE timestampType,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset);
+ VkCmdBuffer cmdBuffer,
+ VkTimestampType timestampType,
+ VkBuffer destBuffer,
+ VkGpuSize destOffset);
void VKAPI vkCmdInitAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
const uint32_t* pData);
void VKAPI vkCmdLoadAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
- VK_BUFFER srcBuffer,
- VK_GPU_SIZE srcOffset);
+ VkBuffer srcBuffer,
+ VkGpuSize srcOffset);
void VKAPI vkCmdSaveAtomicCounters(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
uint32_t startCounter,
uint32_t counterCount,
- VK_BUFFER destBuffer,
- VK_GPU_SIZE destOffset);
+ VkBuffer destBuffer,
+ VkGpuSize destOffset);
-VK_RESULT VKAPI vkCreateFramebuffer(
- VK_DEVICE device,
- const VK_FRAMEBUFFER_CREATE_INFO* pCreateInfo,
- VK_FRAMEBUFFER* pFramebuffer);
+VkResult VKAPI vkCreateFramebuffer(
+ VkDevice device,
+ const VkFramebufferCreateInfo* pCreateInfo,
+ VkFramebuffer* pFramebuffer);
-VK_RESULT VKAPI vkCreateRenderPass(
- VK_DEVICE device,
- const VK_RENDER_PASS_CREATE_INFO* pCreateInfo,
- VK_RENDER_PASS* pRenderPass);
+VkResult VKAPI vkCreateRenderPass(
+ VkDevice device,
+ const VkRenderPassCreateInfo* pCreateInfo,
+ VkRenderPass* pRenderPass);
void VKAPI vkCmdBeginRenderPass(
- VK_CMD_BUFFER cmdBuffer,
- const VK_RENDER_PASS_BEGIN* pRenderPassBegin);
+ VkCmdBuffer cmdBuffer,
+ const VkRenderPassBegin* pRenderPassBegin);
void VKAPI vkCmdEndRenderPass(
- VK_CMD_BUFFER cmdBuffer,
- VK_RENDER_PASS renderPass);
+ VkCmdBuffer cmdBuffer,
+ VkRenderPass renderPass);
#endif // VK_PROTOTYPES
diff --git a/layers/basic.cpp b/layers/basic.cpp
index 402adb68..9501c13e 100644
--- a/layers/basic.cpp
+++ b/layers/basic.cpp
@@ -49,21 +49,21 @@ static VK_LAYER_DISPATCH_TABLE * initLayerTable(const VK_BASE_LAYER_OBJECT *gpuw
return it->second;
}
- layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VK_PHYSICAL_GPU) gpuw->nextObject);
+ layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
return pTable;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkLayerExtension1(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkLayerExtension1(VkDevice device)
{
printf("In vkLayerExtension1() call w/ device: %p\n", (void*)device);
printf("vkLayerExtension1 returning SUCCESS\n");
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
- VK_RESULT result;
+ VkResult result;
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
/* This entrypoint is NOT going to init it's own dispatch table since loader calls here early */
@@ -77,7 +77,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
{
printf("At start of wrapped vkGetExtensionSupport() call w/ gpu: %p\n", (void*)gpu);
VK_LAYER_DISPATCH_TABLE* pTable = tableMap[gpuw];
- result = pTable->GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ result = pTable->GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
printf("Completed wrapped vkGetExtensionSupport() call w/ gpu: %p\n", (void*)gpu);
} else
{
@@ -86,29 +86,29 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
VK_LAYER_DISPATCH_TABLE* pTable = tableMap[gpuw];
printf("At start of wrapped vkCreateDevice() call w/ gpu: %p\n", (void*)gpu);
- VK_RESULT result = pTable->CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = pTable->CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
// create a mapping for the device object into the dispatch table
tableMap.emplace(*pDevice, pTable);
printf("Completed wrapped vkCreateDevice() call w/ pDevice, Device %p: %p\n", (void*)pDevice, (void *) *pDevice);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFormatInfo(VK_DEVICE device, VK_FORMAT format, VK_FORMAT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFormatInfo(VkDevice device, VkFormat format, VkFormatInfoType infoType, size_t* pDataSize, void* pData)
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap[device];
printf("At start of wrapped vkGetFormatInfo() call w/ device: %p\n", (void*)device);
- VK_RESULT result = pTable->GetFormatInfo(device, format, infoType, pDataSize, pData);
+ VkResult result = pTable->GetFormatInfo(device, format, infoType, pDataSize, pData);
printf("Completed wrapped vkGetFormatInfo() call w/ device: %p\n", (void*)device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
{
@@ -116,7 +116,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
VK_LAYER_DISPATCH_TABLE* pTable = initLayerTable(gpuw);
printf("At start of wrapped vkEnumerateLayers() call w/ gpu: %p\n", gpu);
- VK_RESULT result = pTable->EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = pTable->EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
return result;
} else
{
@@ -125,11 +125,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
// Example of a layer that is only compatible with Intel's GPUs
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT*) pReserved;
- vkGetGpuInfoType fpGetGpuInfo;
- VK_PHYSICAL_GPU_PROPERTIES gpuProps;
- size_t dataSize = sizeof(VK_PHYSICAL_GPU_PROPERTIES);
- fpGetGpuInfo = (vkGetGpuInfoType) gpuw->pGPA((VK_PHYSICAL_GPU) gpuw->nextObject, "vkGetGpuInfo");
- fpGetGpuInfo((VK_PHYSICAL_GPU) gpuw->nextObject, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES, &dataSize, &gpuProps);
+ PFN_vkGetGpuInfo fpGetGpuInfo;
+ VkPhysicalGpuProperties gpuProps;
+ size_t dataSize = sizeof(VkPhysicalGpuProperties);
+ fpGetGpuInfo = (PFN_vkGetGpuInfo) gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, "vkGetGpuInfo");
+ fpGetGpuInfo((VkPhysicalGpu) gpuw->nextObject, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES, &dataSize, &gpuProps);
if (gpuProps.vendorId == 0x8086)
{
*pOutLayerCount = 1;
@@ -142,7 +142,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
}
}
-VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
{
if (gpu == NULL)
return NULL;
@@ -165,6 +165,6 @@ VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* pNam
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
}
}
diff --git a/layers/draw_state.cpp b/layers/draw_state.cpp
index 5e4a95fe..6ff0fcee 100644
--- a/layers/draw_state.cpp
+++ b/layers/draw_state.cpp
@@ -41,18 +41,18 @@
#include "loader_platform.h"
#include "layers_msg.h"
-unordered_map<VK_SAMPLER, SAMPLER_NODE*> sampleMap;
-unordered_map<VK_IMAGE_VIEW, IMAGE_NODE*> imageMap;
-unordered_map<VK_BUFFER_VIEW, BUFFER_NODE*> bufferMap;
-unordered_map<VK_DYNAMIC_STATE_OBJECT, DYNAMIC_STATE_NODE*> dynamicStateMap;
-unordered_map<VK_PIPELINE, PIPELINE_NODE*> pipelineMap;
-unordered_map<VK_DESCRIPTOR_POOL, POOL_NODE*> poolMap;
-unordered_map<VK_DESCRIPTOR_SET, SET_NODE*> setMap;
-unordered_map<VK_DESCRIPTOR_SET_LAYOUT, LAYOUT_NODE*> layoutMap;
+unordered_map<VkSampler, SAMPLER_NODE*> sampleMap;
+unordered_map<VkImageView, IMAGE_NODE*> imageMap;
+unordered_map<VkBufferView, BUFFER_NODE*> bufferMap;
+unordered_map<VkDynamicStateObject, DYNAMIC_STATE_NODE*> dynamicStateMap;
+unordered_map<VkPipeline, PIPELINE_NODE*> pipelineMap;
+unordered_map<VkDescriptorPool, POOL_NODE*> poolMap;
+unordered_map<VkDescriptorSet, SET_NODE*> setMap;
+unordered_map<VkDescriptorSetLayout, LAYOUT_NODE*> layoutMap;
// Map for layout chains
-unordered_map<VK_CMD_BUFFER, GLOBAL_CB_NODE*> cmdBufferMap;
-unordered_map<VK_RENDER_PASS, VK_RENDER_PASS_CREATE_INFO*> renderPassMap;
-unordered_map<VK_FRAMEBUFFER, VK_FRAMEBUFFER_CREATE_INFO*> frameBufferMap;
+unordered_map<VkCmdBuffer, GLOBAL_CB_NODE*> cmdBufferMap;
+unordered_map<VkRenderPass, VkRenderPassCreateInfo*> renderPassMap;
+unordered_map<VkFramebuffer, VkFramebufferCreateInfo*> frameBufferMap;
static VK_LAYER_DISPATCH_TABLE nextTable;
static VK_BASE_LAYER_OBJECT *pCurObj;
@@ -176,7 +176,7 @@ static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
// to that same cmd buffer by separate thread are not changing state from underneath us
// Track the last cmd buffer touched by this thread
-static VK_CMD_BUFFER g_lastCmdBuffer[MAX_TID] = {NULL};
+static VkCmdBuffer g_lastCmdBuffer[MAX_TID] = {NULL};
// Track the last group of CBs touched for displaying to dot file
static GLOBAL_CB_NODE* g_pLastTouchedCB[NUM_COMMAND_BUFFERS_TO_DISPLAY] = {NULL};
static uint32_t g_lastTouchedCBIndex = 0;
@@ -184,14 +184,14 @@ static uint32_t g_lastTouchedCBIndex = 0;
static GLOBAL_CB_NODE* g_lastGlobalCB = NULL;
static PIPELINE_NODE* g_lastBoundPipeline = NULL;
static DYNAMIC_STATE_NODE* g_lastBoundDynamicState[VK_NUM_STATE_BIND_POINT] = {NULL};
-static VK_DESCRIPTOR_SET g_lastBoundDescriptorSet = NULL;
+static VkDescriptorSet g_lastBoundDescriptorSet = NULL;
#define MAX_BINDING 0xFFFFFFFF // Default vtxBinding value in CB Node to identify if no vtxBinding set
//static DYNAMIC_STATE_NODE* g_pDynamicStateHead[VK_NUM_STATE_BIND_POINT] = {0};
-static void insertDynamicState(const VK_DYNAMIC_STATE_OBJECT state, const GENERIC_HEADER* pCreateInfo, VK_STATE_BIND_POINT bindPoint)
+static void insertDynamicState(const VkDynamicStateObject state, const GENERIC_HEADER* pCreateInfo, VkStateBindPoint bindPoint)
{
- VK_DYNAMIC_VP_STATE_CREATE_INFO* pVPCI = NULL;
+ VkDynamicVpStateCreateInfo* pVPCI = NULL;
size_t scSize = 0;
size_t vpSize = 0;
loader_platform_thread_lock_mutex(&globalLock);
@@ -199,23 +199,23 @@ static void insertDynamicState(const VK_DYNAMIC_STATE_OBJECT state, const GENERI
pStateNode->stateObj = state;
switch (pCreateInfo->sType) {
case VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO:
- memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VK_DYNAMIC_VP_STATE_CREATE_INFO));
- pVPCI = (VK_DYNAMIC_VP_STATE_CREATE_INFO*)pCreateInfo;
- pStateNode->create_info.vpci.pScissors = new VK_RECT[pStateNode->create_info.vpci.viewportAndScissorCount];
- pStateNode->create_info.vpci.pViewports = new VK_VIEWPORT[pStateNode->create_info.vpci.viewportAndScissorCount];
- scSize = pVPCI->viewportAndScissorCount * sizeof(VK_RECT);
- vpSize = pVPCI->viewportAndScissorCount * sizeof(VK_VIEWPORT);
+ memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VkDynamicVpStateCreateInfo));
+ pVPCI = (VkDynamicVpStateCreateInfo*)pCreateInfo;
+ pStateNode->create_info.vpci.pScissors = new VkRect[pStateNode->create_info.vpci.viewportAndScissorCount];
+ pStateNode->create_info.vpci.pViewports = new VkViewport[pStateNode->create_info.vpci.viewportAndScissorCount];
+ scSize = pVPCI->viewportAndScissorCount * sizeof(VkRect);
+ vpSize = pVPCI->viewportAndScissorCount * sizeof(VkViewport);
memcpy((void*)pStateNode->create_info.vpci.pScissors, pVPCI->pScissors, scSize);
memcpy((void*)pStateNode->create_info.vpci.pViewports, pVPCI->pViewports, vpSize);
break;
case VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO:
- memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VK_DYNAMIC_RS_STATE_CREATE_INFO));
+ memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VkDynamicRsStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO:
- memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VK_DYNAMIC_CB_STATE_CREATE_INFO));
+ memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VkDynamicCbStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO:
- memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VK_DYNAMIC_DS_STATE_CREATE_INFO));
+ memcpy(&pStateNode->create_info, pCreateInfo, sizeof(VkDynamicDsStateCreateInfo));
break;
default:
assert(0);
@@ -228,7 +228,7 @@ static void insertDynamicState(const VK_DYNAMIC_STATE_OBJECT state, const GENERI
// Free all allocated nodes for Dynamic State objs
static void deleteDynamicState()
{
- for (unordered_map<VK_DYNAMIC_STATE_OBJECT, DYNAMIC_STATE_NODE*>::iterator ii=dynamicStateMap.begin(); ii!=dynamicStateMap.end(); ++ii) {
+ for (unordered_map<VkDynamicStateObject, DYNAMIC_STATE_NODE*>::iterator ii=dynamicStateMap.begin(); ii!=dynamicStateMap.end(); ++ii) {
if (VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO == (*ii).second->create_info.vpci.sType) {
delete[] (*ii).second->create_info.vpci.pScissors;
delete[] (*ii).second->create_info.vpci.pViewports;
@@ -239,11 +239,11 @@ static void deleteDynamicState()
// Free all sampler nodes
static void deleteSamplers()
{
- for (unordered_map<VK_SAMPLER, SAMPLER_NODE*>::iterator ii=sampleMap.begin(); ii!=sampleMap.end(); ++ii) {
+ for (unordered_map<VkSampler, SAMPLER_NODE*>::iterator ii=sampleMap.begin(); ii!=sampleMap.end(); ++ii) {
delete (*ii).second;
}
}
-static VK_IMAGE_VIEW_CREATE_INFO* getImageViewCreateInfo(VK_IMAGE_VIEW view)
+static VkImageViewCreateInfo* getImageViewCreateInfo(VkImageView view)
{
loader_platform_thread_lock_mutex(&globalLock);
if (imageMap.find(view) == imageMap.end()) {
@@ -258,11 +258,11 @@ static VK_IMAGE_VIEW_CREATE_INFO* getImageViewCreateInfo(VK_IMAGE_VIEW view)
// Free all image nodes
static void deleteImages()
{
- for (unordered_map<VK_IMAGE_VIEW, IMAGE_NODE*>::iterator ii=imageMap.begin(); ii!=imageMap.end(); ++ii) {
+ for (unordered_map<VkImageView, IMAGE_NODE*>::iterator ii=imageMap.begin(); ii!=imageMap.end(); ++ii) {
delete (*ii).second;
}
}
-static VkBufferViewCreateInfo* getBufferViewCreateInfo(VK_BUFFER_VIEW view)
+static VkBufferViewCreateInfo* getBufferViewCreateInfo(VkBufferView view)
{
loader_platform_thread_lock_mutex(&globalLock);
if (bufferMap.find(view) == bufferMap.end()) {
@@ -277,13 +277,13 @@ static VkBufferViewCreateInfo* getBufferViewCreateInfo(VK_BUFFER_VIEW view)
// Free all buffer nodes
static void deleteBuffers()
{
- for (unordered_map<VK_BUFFER_VIEW, BUFFER_NODE*>::iterator ii=bufferMap.begin(); ii!=bufferMap.end(); ++ii) {
+ for (unordered_map<VkBufferView, BUFFER_NODE*>::iterator ii=bufferMap.begin(); ii!=bufferMap.end(); ++ii) {
delete (*ii).second;
}
}
-static GLOBAL_CB_NODE* getCBNode(VK_CMD_BUFFER cb);
+static GLOBAL_CB_NODE* getCBNode(VkCmdBuffer cb);
-static void updateCBTracking(VK_CMD_BUFFER cb)
+static void updateCBTracking(VkCmdBuffer cb)
{
g_lastCmdBuffer[getTIDIndex()] = cb;
GLOBAL_CB_NODE* pCB = getCBNode(cb);
@@ -302,7 +302,7 @@ static void updateCBTracking(VK_CMD_BUFFER cb)
}
// Print the last bound dynamic state
-static void printDynamicState(const VK_CMD_BUFFER cb)
+static void printDynamicState(const VkCmdBuffer cb)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB) {
@@ -310,13 +310,13 @@ static void printDynamicState(const VK_CMD_BUFFER cb)
char str[4*1024];
for (uint32_t i = 0; i < VK_NUM_STATE_BIND_POINT; i++) {
if (pCB->lastBoundDynamicState[i]) {
- sprintf(str, "Reporting CreateInfo for currently bound %s object %p", string_VK_STATE_BIND_POINT((VK_STATE_BIND_POINT)i), pCB->lastBoundDynamicState[i]->stateObj);
+ sprintf(str, "Reporting CreateInfo for currently bound %s object %p", string_VkStateBindPoint((VkStateBindPoint)i), pCB->lastBoundDynamicState[i]->stateObj);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, pCB->lastBoundDynamicState[i]->stateObj, 0, DRAWSTATE_NONE, "DS", str);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, pCB->lastBoundDynamicState[i]->stateObj, 0, DRAWSTATE_NONE, "DS", dynamic_display(pCB->lastBoundDynamicState[i]->pCreateInfo, " ").c_str());
break;
}
else {
- sprintf(str, "No dynamic state of type %s bound", string_VK_STATE_BIND_POINT((VK_STATE_BIND_POINT)i));
+ sprintf(str, "No dynamic state of type %s bound", string_VkStateBindPoint((VkStateBindPoint)i));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", str);
}
}
@@ -329,7 +329,7 @@ static void printDynamicState(const VK_CMD_BUFFER cb)
}
}
// Retrieve pipeline node ptr for given pipeline object
-static PIPELINE_NODE* getPipeline(VK_PIPELINE pipeline)
+static PIPELINE_NODE* getPipeline(VkPipeline pipeline)
{
loader_platform_thread_lock_mutex(&globalLock);
if (pipelineMap.find(pipeline) == pipelineMap.end()) {
@@ -341,7 +341,7 @@ static PIPELINE_NODE* getPipeline(VK_PIPELINE pipeline)
}
// For given sampler, return a ptr to its Create Info struct, or NULL if sampler not found
-static VK_SAMPLER_CREATE_INFO* getSamplerCreateInfo(const VK_SAMPLER sampler)
+static VkSamplerCreateInfo* getSamplerCreateInfo(const VkSampler sampler)
{
loader_platform_thread_lock_mutex(&globalLock);
if (sampleMap.find(sampler) == sampleMap.end()) {
@@ -354,49 +354,49 @@ static VK_SAMPLER_CREATE_INFO* getSamplerCreateInfo(const VK_SAMPLER sampler)
// Init the pipeline mapping info based on pipeline create info LL tree
// Threading note : Calls to this function should wrapped in mutex
-static void initPipeline(PIPELINE_NODE* pPipeline, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo)
+static void initPipeline(PIPELINE_NODE* pPipeline, const VkGraphicsPipelineCreateInfo* pCreateInfo)
{
// First init create info, we'll shadow the structs as we go down the tree
// TODO : Validate that no create info is incorrectly replicated
- memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VK_GRAPHICS_PIPELINE_CREATE_INFO));
+ memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
GENERIC_HEADER* pTrav = (GENERIC_HEADER*)pCreateInfo->pNext;
GENERIC_HEADER* pPrev = (GENERIC_HEADER*)&pPipeline->graphicsPipelineCI; // Hold prev ptr to tie chain of structs together
size_t bufferSize = 0;
- VK_PIPELINE_VERTEX_INPUT_CREATE_INFO* pVICI = NULL;
- VK_PIPELINE_CB_STATE_CREATE_INFO* pCBCI = NULL;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO* pTmpPSSCI = NULL;
+ VkPipelineVertexInputCreateInfo* pVICI = NULL;
+ VkPipelineCbStateCreateInfo* pCBCI = NULL;
+ VkPipelineShaderStageCreateInfo* pTmpPSSCI = NULL;
while (pTrav) {
switch (pTrav->sType) {
case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO:
- pTmpPSSCI = (VK_PIPELINE_SHADER_STAGE_CREATE_INFO*)pTrav;
+ pTmpPSSCI = (VkPipelineShaderStageCreateInfo*)pTrav;
switch (pTmpPSSCI->shader.stage) {
case VK_SHADER_STAGE_VERTEX:
pPrev->pNext = &pPipeline->vsCI;
pPrev = (GENERIC_HEADER*)&pPipeline->vsCI;
- memcpy(&pPipeline->vsCI, pTmpPSSCI, sizeof(VK_PIPELINE_SHADER_STAGE_CREATE_INFO));
+ memcpy(&pPipeline->vsCI, pTmpPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
break;
case VK_SHADER_STAGE_TESS_CONTROL:
pPrev->pNext = &pPipeline->tcsCI;
pPrev = (GENERIC_HEADER*)&pPipeline->tcsCI;
- memcpy(&pPipeline->tcsCI, pTmpPSSCI, sizeof(VK_PIPELINE_SHADER_STAGE_CREATE_INFO));
+ memcpy(&pPipeline->tcsCI, pTmpPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
break;
case VK_SHADER_STAGE_TESS_EVALUATION:
pPrev->pNext = &pPipeline->tesCI;
pPrev = (GENERIC_HEADER*)&pPipeline->tesCI;
- memcpy(&pPipeline->tesCI, pTmpPSSCI, sizeof(VK_PIPELINE_SHADER_STAGE_CREATE_INFO));
+ memcpy(&pPipeline->tesCI, pTmpPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
break;
case VK_SHADER_STAGE_GEOMETRY:
pPrev->pNext = &pPipeline->gsCI;
pPrev = (GENERIC_HEADER*)&pPipeline->gsCI;
- memcpy(&pPipeline->gsCI, pTmpPSSCI, sizeof(VK_PIPELINE_SHADER_STAGE_CREATE_INFO));
+ memcpy(&pPipeline->gsCI, pTmpPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
break;
case VK_SHADER_STAGE_FRAGMENT:
pPrev->pNext = &pPipeline->fsCI;
pPrev = (GENERIC_HEADER*)&pPipeline->fsCI;
- memcpy(&pPipeline->fsCI, pTmpPSSCI, sizeof(VK_PIPELINE_SHADER_STAGE_CREATE_INFO));
+ memcpy(&pPipeline->fsCI, pTmpPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
break;
case VK_SHADER_STAGE_COMPUTE:
- // TODO : Flag error, CS is specified through VK_COMPUTE_PIPELINE_CREATE_INFO
+ // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
break;
default:
// TODO : Flag error
@@ -406,64 +406,64 @@ static void initPipeline(PIPELINE_NODE* pPipeline, const VK_GRAPHICS_PIPELINE_CR
case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO:
pPrev->pNext = &pPipeline->vertexInputCI;
pPrev = (GENERIC_HEADER*)&pPipeline->vertexInputCI;
- memcpy((void*)&pPipeline->vertexInputCI, pTrav, sizeof(VK_PIPELINE_VERTEX_INPUT_CREATE_INFO));
+ memcpy((void*)&pPipeline->vertexInputCI, pTrav, sizeof(VkPipelineVertexInputCreateInfo));
// Copy embedded ptrs
- pVICI = (VK_PIPELINE_VERTEX_INPUT_CREATE_INFO*)pTrav;
+ pVICI = (VkPipelineVertexInputCreateInfo*)pTrav;
pPipeline->vtxBindingCount = pVICI->bindingCount;
if (pPipeline->vtxBindingCount) {
- pPipeline->pVertexBindingDescriptions = new VK_VERTEX_INPUT_BINDING_DESCRIPTION[pPipeline->vtxBindingCount];
- bufferSize = pPipeline->vtxBindingCount * sizeof(VK_VERTEX_INPUT_BINDING_DESCRIPTION);
- memcpy((void*)pPipeline->pVertexBindingDescriptions, ((VK_PIPELINE_VERTEX_INPUT_CREATE_INFO*)pTrav)->pVertexAttributeDescriptions, bufferSize);
+ pPipeline->pVertexBindingDescriptions = new VkVertexInputBindingDescription[pPipeline->vtxBindingCount];
+ bufferSize = pPipeline->vtxBindingCount * sizeof(VkVertexInputBindingDescription);
+ memcpy((void*)pPipeline->pVertexBindingDescriptions, ((VkPipelineVertexInputCreateInfo*)pTrav)->pVertexAttributeDescriptions, bufferSize);
}
pPipeline->vtxAttributeCount = pVICI->attributeCount;
if (pPipeline->vtxAttributeCount) {
- pPipeline->pVertexAttributeDescriptions = new VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION[pPipeline->vtxAttributeCount];
- bufferSize = pPipeline->vtxAttributeCount * sizeof(VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION);
- memcpy((void*)pPipeline->pVertexAttributeDescriptions, ((VK_PIPELINE_VERTEX_INPUT_CREATE_INFO*)pTrav)->pVertexAttributeDescriptions, bufferSize);
+ pPipeline->pVertexAttributeDescriptions = new VkVertexInputAttributeDescription[pPipeline->vtxAttributeCount];
+ bufferSize = pPipeline->vtxAttributeCount * sizeof(VkVertexInputAttributeDescription);
+ memcpy((void*)pPipeline->pVertexAttributeDescriptions, ((VkPipelineVertexInputCreateInfo*)pTrav)->pVertexAttributeDescriptions, bufferSize);
}
break;
case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->iaStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->iaStateCI;
- memcpy((void*)&pPipeline->iaStateCI, pTrav, sizeof(VK_PIPELINE_IA_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->iaStateCI, pTrav, sizeof(VkPipelineIaStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->tessStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->tessStateCI;
- memcpy((void*)&pPipeline->tessStateCI, pTrav, sizeof(VK_PIPELINE_TESS_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->tessStateCI, pTrav, sizeof(VkPipelineTessStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->vpStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->vpStateCI;
- memcpy((void*)&pPipeline->vpStateCI, pTrav, sizeof(VK_PIPELINE_VP_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->vpStateCI, pTrav, sizeof(VkPipelineVpStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->rsStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->rsStateCI;
- memcpy((void*)&pPipeline->rsStateCI, pTrav, sizeof(VK_PIPELINE_RS_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->rsStateCI, pTrav, sizeof(VkPipelineRsStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->msStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->msStateCI;
- memcpy((void*)&pPipeline->msStateCI, pTrav, sizeof(VK_PIPELINE_MS_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->msStateCI, pTrav, sizeof(VkPipelineMsStateCreateInfo));
break;
case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->cbStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->cbStateCI;
- memcpy((void*)&pPipeline->cbStateCI, pTrav, sizeof(VK_PIPELINE_CB_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->cbStateCI, pTrav, sizeof(VkPipelineCbStateCreateInfo));
// Copy embedded ptrs
- pCBCI = (VK_PIPELINE_CB_STATE_CREATE_INFO*)pTrav;
+ pCBCI = (VkPipelineCbStateCreateInfo*)pTrav;
pPipeline->attachmentCount = pCBCI->attachmentCount;
if (pPipeline->attachmentCount) {
- pPipeline->pAttachments = new VK_PIPELINE_CB_ATTACHMENT_STATE[pPipeline->attachmentCount];
- bufferSize = pPipeline->attachmentCount * sizeof(VK_PIPELINE_CB_ATTACHMENT_STATE);
- memcpy((void*)pPipeline->pAttachments, ((VK_PIPELINE_CB_STATE_CREATE_INFO*)pTrav)->pAttachments, bufferSize);
+ pPipeline->pAttachments = new VkPipelineCbAttachmentState[pPipeline->attachmentCount];
+ bufferSize = pPipeline->attachmentCount * sizeof(VkPipelineCbAttachmentState);
+ memcpy((void*)pPipeline->pAttachments, ((VkPipelineCbStateCreateInfo*)pTrav)->pAttachments, bufferSize);
}
break;
case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO:
pPrev->pNext = &pPipeline->dsStateCI;
pPrev = (GENERIC_HEADER*)&pPipeline->dsStateCI;
- memcpy((void*)&pPipeline->dsStateCI, pTrav, sizeof(VK_PIPELINE_DS_STATE_CREATE_INFO));
+ memcpy((void*)&pPipeline->dsStateCI, pTrav, sizeof(VkPipelineDsStateCreateInfo));
break;
default:
assert(0);
@@ -476,7 +476,7 @@ static void initPipeline(PIPELINE_NODE* pPipeline, const VK_GRAPHICS_PIPELINE_CR
// Free the Pipeline nodes
static void deletePipelines()
{
- for (unordered_map<VK_PIPELINE, PIPELINE_NODE*>::iterator ii=pipelineMap.begin(); ii!=pipelineMap.end(); ++ii) {
+ for (unordered_map<VkPipeline, PIPELINE_NODE*>::iterator ii=pipelineMap.begin(); ii!=pipelineMap.end(); ++ii) {
if ((*ii).second->pVertexBindingDescriptions) {
delete[] (*ii).second->pVertexBindingDescriptions;
}
@@ -490,7 +490,7 @@ static void deletePipelines()
}
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
-static uint32_t getNumSamples(const VK_PIPELINE pipeline)
+static uint32_t getNumSamples(const VkPipeline pipeline)
{
PIPELINE_NODE* pPipe = pipelineMap[pipeline];
if (VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
@@ -500,14 +500,14 @@ static uint32_t getNumSamples(const VK_PIPELINE pipeline)
return 1;
}
// Validate state related to the PSO
-static void validatePipelineState(const GLOBAL_CB_NODE* pCB, const VK_PIPELINE_BIND_POINT pipelineBindPoint, const VK_PIPELINE pipeline)
+static void validatePipelineState(const GLOBAL_CB_NODE* pCB, const VkPipelineBindPoint pipelineBindPoint, const VkPipeline pipeline)
{
if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
// Verify that any MSAA request in PSO matches sample# in bound FB
uint32_t psoNumSamples = getNumSamples(pipeline);
if (pCB->activeRenderPass) {
- VK_RENDER_PASS_CREATE_INFO* pRPCI = renderPassMap[pCB->activeRenderPass];
- VK_FRAMEBUFFER_CREATE_INFO* pFBCI = frameBufferMap[pCB->framebuffer];
+ VkRenderPassCreateInfo* pRPCI = renderPassMap[pCB->activeRenderPass];
+ VkFramebufferCreateInfo* pFBCI = frameBufferMap[pCB->framebuffer];
if (psoNumSamples != pFBCI->sampleCount) {
char str[1024];
sprintf(str, "Num samples mismatche! Binding PSO (%p) with %u samples while current RenderPass (%p) uses FB (%p) with %u samples!", (void*)pipeline, psoNumSamples, (void*)pCB->activeRenderPass, (void*)pCB->framebuffer, pFBCI->sampleCount);
@@ -526,7 +526,7 @@ static void validatePipelineState(const GLOBAL_CB_NODE* pCB, const VK_PIPELINE_B
// Block of code at start here specifically for managing/tracking DSs
// Return Pool node ptr for specified pool or else NULL
-static POOL_NODE* getPoolNode(VK_DESCRIPTOR_POOL pool)
+static POOL_NODE* getPoolNode(VkDescriptorPool pool)
{
loader_platform_thread_lock_mutex(&globalLock);
if (poolMap.find(pool) == poolMap.end()) {
@@ -537,7 +537,7 @@ static POOL_NODE* getPoolNode(VK_DESCRIPTOR_POOL pool)
return poolMap[pool];
}
// Return Set node ptr for specified set or else NULL
-static SET_NODE* getSetNode(VK_DESCRIPTOR_SET set)
+static SET_NODE* getSetNode(VkDescriptorSet set)
{
loader_platform_thread_lock_mutex(&globalLock);
if (setMap.find(set) == setMap.end()) {
@@ -549,7 +549,7 @@ static SET_NODE* getSetNode(VK_DESCRIPTOR_SET set)
}
// Return VK_TRUE if DS Exists and is within an vkBeginDescriptorPoolUpdate() call sequence, otherwise VK_FALSE
-static bool32_t dsUpdateActive(VK_DESCRIPTOR_SET ds)
+static bool32_t dsUpdateActive(VkDescriptorSet ds)
{
// Note, both "get" functions use global mutex so this guy does not
SET_NODE* pTrav = getSetNode(ds);
@@ -562,7 +562,7 @@ static bool32_t dsUpdateActive(VK_DESCRIPTOR_SET ds)
return VK_FALSE;
}
-static LAYOUT_NODE* getLayoutNode(const VK_DESCRIPTOR_SET_LAYOUT layout) {
+static LAYOUT_NODE* getLayoutNode(const VkDescriptorSetLayout layout) {
loader_platform_thread_lock_mutex(&globalLock);
if (layoutMap.find(layout) == layoutMap.end()) {
loader_platform_thread_unlock_mutex(&globalLock);
@@ -578,15 +578,15 @@ static uint32_t getUpdateBinding(const GENERIC_HEADER* pUpdateStruct)
switch (pUpdateStruct->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- return ((VK_UPDATE_SAMPLERS*)pUpdateStruct)->binding;
+ return ((VkUpdateSamplers*)pUpdateStruct)->binding;
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- return ((VK_UPDATE_SAMPLER_TEXTURES*)pUpdateStruct)->binding;
+ return ((VkUpdateSamplerTextures*)pUpdateStruct)->binding;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- return ((VK_UPDATE_IMAGES*)pUpdateStruct)->binding;
+ return ((VkUpdateImages*)pUpdateStruct)->binding;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- return ((VK_UPDATE_BUFFERS*)pUpdateStruct)->binding;
+ return ((VkUpdateBuffers*)pUpdateStruct)->binding;
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
- return ((VK_UPDATE_AS_COPY*)pUpdateStruct)->binding;
+ return ((VkUpdateAsCopy*)pUpdateStruct)->binding;
default:
// TODO : Flag specific error for this case
assert(0);
@@ -599,16 +599,16 @@ static uint32_t getUpdateArrayIndex(const GENERIC_HEADER* pUpdateStruct)
switch (pUpdateStruct->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- return (((VK_UPDATE_SAMPLERS*)pUpdateStruct)->arrayIndex);
+ return (((VkUpdateSamplers*)pUpdateStruct)->arrayIndex);
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- return (((VK_UPDATE_SAMPLER_TEXTURES*)pUpdateStruct)->arrayIndex);
+ return (((VkUpdateSamplerTextures*)pUpdateStruct)->arrayIndex);
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- return (((VK_UPDATE_IMAGES*)pUpdateStruct)->arrayIndex);
+ return (((VkUpdateImages*)pUpdateStruct)->arrayIndex);
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- return (((VK_UPDATE_BUFFERS*)pUpdateStruct)->arrayIndex);
+ return (((VkUpdateBuffers*)pUpdateStruct)->arrayIndex);
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
// TODO : Need to understand this case better and make sure code is correct
- return (((VK_UPDATE_AS_COPY*)pUpdateStruct)->arrayElement);
+ return (((VkUpdateAsCopy*)pUpdateStruct)->arrayElement);
default:
// TODO : Flag specific error for this case
assert(0);
@@ -621,16 +621,16 @@ static uint32_t getUpdateCount(const GENERIC_HEADER* pUpdateStruct)
switch (pUpdateStruct->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- return (((VK_UPDATE_SAMPLERS*)pUpdateStruct)->count);
+ return (((VkUpdateSamplers*)pUpdateStruct)->count);
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- return (((VK_UPDATE_SAMPLER_TEXTURES*)pUpdateStruct)->count);
+ return (((VkUpdateSamplerTextures*)pUpdateStruct)->count);
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- return (((VK_UPDATE_IMAGES*)pUpdateStruct)->count);
+ return (((VkUpdateImages*)pUpdateStruct)->count);
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- return (((VK_UPDATE_BUFFERS*)pUpdateStruct)->count);
+ return (((VkUpdateBuffers*)pUpdateStruct)->count);
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
// TODO : Need to understand this case better and make sure code is correct
- return (((VK_UPDATE_AS_COPY*)pUpdateStruct)->count);
+ return (((VkUpdateAsCopy*)pUpdateStruct)->count);
default:
// TODO : Flag specific error for this case
assert(0);
@@ -669,7 +669,7 @@ static uint32_t getUpdateEndIndex(const LAYOUT_NODE* pLayout, const GENERIC_HEAD
static bool32_t validateUpdateType(const LAYOUT_NODE* pLayout, const GENERIC_HEADER* pUpdateStruct)
{
// First get actual type of update
- VK_DESCRIPTOR_TYPE actualType;
+ VkDescriptorType actualType;
uint32_t i = 0;
switch (pUpdateStruct->sType)
{
@@ -680,13 +680,13 @@ static bool32_t validateUpdateType(const LAYOUT_NODE* pLayout, const GENERIC_HEA
actualType = VK_DESCRIPTOR_TYPE_SAMPLER_TEXTURE;
break;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- actualType = ((VK_UPDATE_IMAGES*)pUpdateStruct)->descriptorType;
+ actualType = ((VkUpdateImages*)pUpdateStruct)->descriptorType;
break;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- actualType = ((VK_UPDATE_BUFFERS*)pUpdateStruct)->descriptorType;
+ actualType = ((VkUpdateBuffers*)pUpdateStruct)->descriptorType;
break;
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
- actualType = ((VK_UPDATE_AS_COPY*)pUpdateStruct)->descriptorType;
+ actualType = ((VkUpdateAsCopy*)pUpdateStruct)->descriptorType;
break;
default:
// TODO : Flag specific error for this case
@@ -704,64 +704,64 @@ static bool32_t validateUpdateType(const LAYOUT_NODE* pLayout, const GENERIC_HEA
static GENERIC_HEADER* shadowUpdateNode(GENERIC_HEADER* pUpdate)
{
GENERIC_HEADER* pNewNode = NULL;
- VK_UPDATE_SAMPLERS* pUS = NULL;
- VK_UPDATE_SAMPLER_TEXTURES* pUST = NULL;
- VK_UPDATE_BUFFERS* pUB = NULL;
- VK_UPDATE_IMAGES* pUI = NULL;
- VK_UPDATE_AS_COPY* pUAC = NULL;
+ VkUpdateSamplers* pUS = NULL;
+ VkUpdateSamplerTextures* pUST = NULL;
+ VkUpdateBuffers* pUB = NULL;
+ VkUpdateImages* pUI = NULL;
+ VkUpdateAsCopy* pUAC = NULL;
size_t array_size = 0;
size_t base_array_size = 0;
size_t total_array_size = 0;
size_t baseBuffAddr = 0;
- VK_IMAGE_VIEW_ATTACH_INFO** ppLocalImageViews = NULL;
- VK_BUFFER_VIEW_ATTACH_INFO** ppLocalBufferViews = NULL;
+ VkImageViewAttachInfo** ppLocalImageViews = NULL;
+ VkBufferViewAttachInfo** ppLocalBufferViews = NULL;
char str[1024];
switch (pUpdate->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- pUS = new VK_UPDATE_SAMPLERS;
+ pUS = new VkUpdateSamplers;
pNewNode = (GENERIC_HEADER*)pUS;
- memcpy(pUS, pUpdate, sizeof(VK_UPDATE_SAMPLERS));
- pUS->pSamplers = new VK_SAMPLER[pUS->count];
- array_size = sizeof(VK_SAMPLER) * pUS->count;
- memcpy((void*)pUS->pSamplers, ((VK_UPDATE_SAMPLERS*)pUpdate)->pSamplers, array_size);
+ memcpy(pUS, pUpdate, sizeof(VkUpdateSamplers));
+ pUS->pSamplers = new VkSampler[pUS->count];
+ array_size = sizeof(VkSampler) * pUS->count;
+ memcpy((void*)pUS->pSamplers, ((VkUpdateSamplers*)pUpdate)->pSamplers, array_size);
break;
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- pUST = new VK_UPDATE_SAMPLER_TEXTURES;
+ pUST = new VkUpdateSamplerTextures;
pNewNode = (GENERIC_HEADER*)pUST;
- memcpy(pUST, pUpdate, sizeof(VK_UPDATE_SAMPLER_TEXTURES));
- pUST->pSamplerImageViews = new VK_SAMPLER_IMAGE_VIEW_INFO[pUST->count];
- array_size = sizeof(VK_SAMPLER_IMAGE_VIEW_INFO) * pUST->count;
- memcpy((void*)pUST->pSamplerImageViews, ((VK_UPDATE_SAMPLER_TEXTURES*)pUpdate)->pSamplerImageViews, array_size);
+ memcpy(pUST, pUpdate, sizeof(VkUpdateSamplerTextures));
+ pUST->pSamplerImageViews = new VkSamplerImageViewInfo[pUST->count];
+ array_size = sizeof(VkSamplerImageViewInfo) * pUST->count;
+ memcpy((void*)pUST->pSamplerImageViews, ((VkUpdateSamplerTextures*)pUpdate)->pSamplerImageViews, array_size);
for (uint32_t i = 0; i < pUST->count; i++) {
- VK_IMAGE_VIEW_ATTACH_INFO** ppIV = (VK_IMAGE_VIEW_ATTACH_INFO**)&pUST->pSamplerImageViews[i].pImageView;
- *ppIV = new VK_IMAGE_VIEW_ATTACH_INFO;
- memcpy((void*)*ppIV, ((VK_UPDATE_SAMPLER_TEXTURES*)pUpdate)->pSamplerImageViews[i].pImageView, sizeof(VK_IMAGE_VIEW_ATTACH_INFO));
+ VkImageViewAttachInfo** ppIV = (VkImageViewAttachInfo**)&pUST->pSamplerImageViews[i].pImageView;
+ *ppIV = new VkImageViewAttachInfo;
+ memcpy((void*)*ppIV, ((VkUpdateSamplerTextures*)pUpdate)->pSamplerImageViews[i].pImageView, sizeof(VkImageViewAttachInfo));
}
break;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- pUI = new VK_UPDATE_IMAGES;
+ pUI = new VkUpdateImages;
pNewNode = (GENERIC_HEADER*)pUI;
- memcpy(pUI, pUpdate, sizeof(VK_UPDATE_IMAGES));
- pUI->pImageViews = new VK_IMAGE_VIEW_ATTACH_INFO[pUI->count];
- array_size = (sizeof(VK_IMAGE_VIEW_ATTACH_INFO) * pUI->count);
- memcpy((void*)pUI->pImageViews, ((VK_UPDATE_IMAGES*)pUpdate)->pImageViews, array_size);
+ memcpy(pUI, pUpdate, sizeof(VkUpdateImages));
+ pUI->pImageViews = new VkImageViewAttachInfo[pUI->count];
+ array_size = (sizeof(VkImageViewAttachInfo) * pUI->count);
+ memcpy((void*)pUI->pImageViews, ((VkUpdateImages*)pUpdate)->pImageViews, array_size);
break;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- pUB = new VK_UPDATE_BUFFERS;
+ pUB = new VkUpdateBuffers;
pNewNode = (GENERIC_HEADER*)pUB;
- memcpy(pUB, pUpdate, sizeof(VK_UPDATE_BUFFERS));
- pUB->pBufferViews = new VK_BUFFER_VIEW_ATTACH_INFO[pUB->count];
- array_size = (sizeof(VK_BUFFER_VIEW_ATTACH_INFO) * pUB->count);
- memcpy((void*)pUB->pBufferViews, ((VK_UPDATE_BUFFERS*)pUpdate)->pBufferViews, array_size);
+ memcpy(pUB, pUpdate, sizeof(VkUpdateBuffers));
+ pUB->pBufferViews = new VkBufferViewAttachInfo[pUB->count];
+ array_size = (sizeof(VkBufferViewAttachInfo) * pUB->count);
+ memcpy((void*)pUB->pBufferViews, ((VkUpdateBuffers*)pUpdate)->pBufferViews, array_size);
break;
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
- pUAC = new VK_UPDATE_AS_COPY;
+ pUAC = new VkUpdateAsCopy;
pUpdate = (GENERIC_HEADER*)pUAC;
- memcpy(pUAC, pUpdate, sizeof(VK_UPDATE_AS_COPY));
+ memcpy(pUAC, pUpdate, sizeof(VkUpdateAsCopy));
break;
default:
- sprintf(str, "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", string_VK_STRUCTURE_TYPE(pUpdate->sType), pUpdate->sType);
+ sprintf(str, "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", string_VkStructureType(pUpdate->sType), pUpdate->sType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_INVALID_UPDATE_STRUCT, "DS", str);
return NULL;
}
@@ -770,13 +770,13 @@ static GENERIC_HEADER* shadowUpdateNode(GENERIC_HEADER* pUpdate)
return pNewNode;
}
// For given ds, update its mapping based on ppUpdateArray
-static void dsUpdate(VK_DESCRIPTOR_SET ds, uint32_t updateCount, const void** ppUpdateArray)
+static void dsUpdate(VkDescriptorSet ds, uint32_t updateCount, const void** ppUpdateArray)
{
SET_NODE* pSet = getSetNode(ds);
loader_platform_thread_lock_mutex(&globalLock);
g_lastBoundDescriptorSet = pSet->set;
LAYOUT_NODE* pLayout = NULL;
- VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pLayoutCI = NULL;
+ VkDescriptorSetLayoutCreateInfo* pLayoutCI = NULL;
// TODO : If pCIList is NULL, flag error
// Perform all updates
for (uint32_t i = 0; i < updateCount; i++) {
@@ -785,7 +785,7 @@ static void dsUpdate(VK_DESCRIPTOR_SET ds, uint32_t updateCount, const void** pp
// Make sure that binding is within bounds
if (pLayout->createInfo.count < getUpdateBinding(pUpdate)) {
char str[1024];
- sprintf(str, "Descriptor Set %p does not have binding to match update binding %u for update type %s!", ds, getUpdateBinding(pUpdate), string_VK_STRUCTURE_TYPE(pUpdate->sType));
+ sprintf(str, "Descriptor Set %p does not have binding to match update binding %u for update type %s!", ds, getUpdateBinding(pUpdate), string_VkStructureType(pUpdate->sType));
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, ds, 0, DRAWSTATE_INVALID_UPDATE_INDEX, "DS", str);
}
else {
@@ -793,15 +793,15 @@ static void dsUpdate(VK_DESCRIPTOR_SET ds, uint32_t updateCount, const void** pp
if (getBindingEndIndex(pLayout, getUpdateBinding(pUpdate)) < getUpdateEndIndex(pLayout, pUpdate)) {
char str[48*1024]; // TODO : Keep count of layout CI structs and size this string dynamically based on that count
pLayoutCI = &pLayout->createInfo;
- string DSstr = vk_print_vk_descriptor_set_layout_create_info(pLayoutCI, "{DS} ");
- sprintf(str, "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!", string_VK_STRUCTURE_TYPE(pUpdate->sType), getUpdateBinding(pUpdate), DSstr.c_str());
+ string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
+ sprintf(str, "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!", string_VkStructureType(pUpdate->sType), getUpdateBinding(pUpdate), DSstr.c_str());
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, ds, 0, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS", str);
}
else { // TODO : should we skip update on a type mismatch or force it?
// Layout bindings match w/ update ok, now verify that update is of the right type
if (!validateUpdateType(pLayout, pUpdate)) {
char str[1024];
- sprintf(str, "Descriptor update type of %s does not match overlapping binding type!", string_VK_STRUCTURE_TYPE(pUpdate->sType));
+ sprintf(str, "Descriptor update type of %s does not match overlapping binding type!", string_VkStructureType(pUpdate->sType));
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, ds, 0, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS", str);
}
else {
@@ -843,20 +843,20 @@ static void freeShadowUpdateTree(SET_NODE* pSet)
pFreeUpdate = pShadowUpdate;
pShadowUpdate = (GENERIC_HEADER*)pShadowUpdate->pNext;
uint32_t index = 0;
- VK_UPDATE_SAMPLERS* pUS = NULL;
- VK_UPDATE_SAMPLER_TEXTURES* pUST = NULL;
- VK_UPDATE_IMAGES* pUI = NULL;
- VK_UPDATE_BUFFERS* pUB = NULL;
+ VkUpdateSamplers* pUS = NULL;
+ VkUpdateSamplerTextures* pUST = NULL;
+ VkUpdateImages* pUI = NULL;
+ VkUpdateBuffers* pUB = NULL;
void** ppToFree = NULL;
switch (pFreeUpdate->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- pUS = (VK_UPDATE_SAMPLERS*)pFreeUpdate;
+ pUS = (VkUpdateSamplers*)pFreeUpdate;
if (pUS->pSamplers)
delete[] pUS->pSamplers;
break;
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- pUST = (VK_UPDATE_SAMPLER_TEXTURES*)pFreeUpdate;
+ pUST = (VkUpdateSamplerTextures*)pFreeUpdate;
if (pUST->pSamplerImageViews) {
for (index = 0; index < pUST->count; index++) {
if (pUST->pSamplerImageViews[index].pImageView) {
@@ -867,12 +867,12 @@ static void freeShadowUpdateTree(SET_NODE* pSet)
}
break;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- pUI = (VK_UPDATE_IMAGES*)pFreeUpdate;
+ pUI = (VkUpdateImages*)pFreeUpdate;
if (pUI->pImageViews)
delete[] pUI->pImageViews;
break;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- pUB = (VK_UPDATE_BUFFERS*)pFreeUpdate;
+ pUB = (VkUpdateBuffers*)pFreeUpdate;
if (pUB->pBufferViews)
delete[] pUB->pBufferViews;
break;
@@ -889,7 +889,7 @@ static void freeShadowUpdateTree(SET_NODE* pSet)
// NOTE : Calls to this function should be wrapped in mutex
static void deletePools()
{
- for (unordered_map<VK_DESCRIPTOR_POOL, POOL_NODE*>::iterator ii=poolMap.begin(); ii!=poolMap.end(); ++ii) {
+ for (unordered_map<VkDescriptorPool, POOL_NODE*>::iterator ii=poolMap.begin(); ii!=poolMap.end(); ++ii) {
SET_NODE* pSet = (*ii).second->pSets;
SET_NODE* pFreeSet = pSet;
while (pSet) {
@@ -913,7 +913,7 @@ static void deletePools()
// NOTE : Calls to this function should be wrapped in mutex
static void deleteLayouts()
{
- for (unordered_map<VK_DESCRIPTOR_SET_LAYOUT, LAYOUT_NODE*>::iterator ii=layoutMap.begin(); ii!=layoutMap.end(); ++ii) {
+ for (unordered_map<VkDescriptorSetLayout, LAYOUT_NODE*>::iterator ii=layoutMap.begin(); ii!=layoutMap.end(); ++ii) {
LAYOUT_NODE* pLayout = (*ii).second;
if (pLayout->createInfo.pBinding) {
for (uint32_t i=0; i<pLayout->createInfo.count; i++) {
@@ -930,7 +930,7 @@ static void deleteLayouts()
}
// Currently clearing a set is removing all previous updates to that set
// TODO : Validate if this is correct clearing behavior
-static void clearDescriptorSet(VK_DESCRIPTOR_SET set)
+static void clearDescriptorSet(VkDescriptorSet set)
{
SET_NODE* pSet = getSetNode(set);
if (!pSet) {
@@ -943,7 +943,7 @@ static void clearDescriptorSet(VK_DESCRIPTOR_SET set)
}
}
-static void clearDescriptorPool(VK_DESCRIPTOR_POOL pool)
+static void clearDescriptorPool(VkDescriptorPool pool)
{
POOL_NODE* pPool = getPoolNode(pool);
if (!pPool) {
@@ -961,7 +961,7 @@ static void clearDescriptorPool(VK_DESCRIPTOR_POOL pool)
}
}
// Code here to manage the Cmd buffer LL
-static GLOBAL_CB_NODE* getCBNode(VK_CMD_BUFFER cb)
+static GLOBAL_CB_NODE* getCBNode(VkCmdBuffer cb)
{
loader_platform_thread_lock_mutex(&globalLock);
if (cmdBufferMap.find(cb) == cmdBufferMap.end()) {
@@ -975,7 +975,7 @@ static GLOBAL_CB_NODE* getCBNode(VK_CMD_BUFFER cb)
// NOTE : Calls to this function should be wrapped in mutex
static void deleteCmdBuffers()
{
- for (unordered_map<VK_CMD_BUFFER, GLOBAL_CB_NODE*>::iterator ii=cmdBufferMap.begin(); ii!=cmdBufferMap.end(); ++ii) {
+ for (unordered_map<VkCmdBuffer, GLOBAL_CB_NODE*>::iterator ii=cmdBufferMap.begin(); ii!=cmdBufferMap.end(); ++ii) {
while (!(*ii).second->pCmds.empty()) {
delete (*ii).second->pCmds.back();
(*ii).second->pCmds.pop_back();
@@ -999,7 +999,7 @@ static void addCmd(GLOBAL_CB_NODE* pCB, const CMD_TYPE cmd)
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, pCB->cmdBuffer, 0, DRAWSTATE_OUT_OF_MEMORY, "DS", str);
}
}
-static void resetCB(const VK_CMD_BUFFER cb)
+static void resetCB(const VkCmdBuffer cb)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB) {
@@ -1008,7 +1008,7 @@ static void resetCB(const VK_CMD_BUFFER cb)
pCB->pCmds.pop_back();
}
// Reset CB state
- VK_FLAGS saveFlags = pCB->flags;
+ VkFlags saveFlags = pCB->flags;
uint32_t saveQueueNodeIndex = pCB->queueNodeIndex;
memset(pCB, 0, sizeof(GLOBAL_CB_NODE));
pCB->cmdBuffer = cb;
@@ -1019,7 +1019,7 @@ static void resetCB(const VK_CMD_BUFFER cb)
}
// Set the last bound dynamic state of given type
// TODO : Need to track this per cmdBuffer and correlate cmdBuffer for Draw w/ last bound for that cmdBuffer?
-static void setLastBoundDynamicState(const VK_CMD_BUFFER cmdBuffer, const VK_DYNAMIC_STATE_OBJECT state, const VK_STATE_BIND_POINT sType)
+static void setLastBoundDynamicState(const VkCmdBuffer cmdBuffer, const VkDynamicStateObject state, const VkStateBindPoint sType)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -1044,7 +1044,7 @@ static void setLastBoundDynamicState(const VK_CMD_BUFFER cmdBuffer, const VK_DYN
}
}
// Print the last bound Gfx Pipeline
-static void printPipeline(const VK_CMD_BUFFER cb)
+static void printPipeline(const VkCmdBuffer cb)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB) {
@@ -1053,13 +1053,13 @@ static void printPipeline(const VK_CMD_BUFFER cb)
// nothing to print
}
else {
- string pipeStr = vk_print_vk_graphics_pipeline_create_info(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str();
+ string pipeStr = vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str();
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", pipeStr.c_str());
}
}
}
// Common Dot dumping code
-static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
+static void dsCoreDumpDot(const VkDescriptorSet ds, FILE* pOutFile)
{
SET_NODE* pSet = getSetNode(ds);
if (pSet) {
@@ -1067,7 +1067,7 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
char tmp_str[4*1024];
fprintf(pOutFile, "subgraph cluster_DescriptorPool\n{\nlabel=\"Descriptor Pool\"\n");
sprintf(tmp_str, "Pool (%p)", pPool->pool);
- char* pGVstr = vk_gv_print_vk_descriptor_pool_create_info(&pPool->createInfo, tmp_str);
+ char* pGVstr = vk_gv_print_vkdescriptorpoolcreateinfo(&pPool->createInfo, tmp_str);
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
fprintf(pOutFile, "subgraph cluster_DescriptorSet\n{\nlabel=\"Descriptor Set (%p)\"\n", pSet->set);
@@ -1076,7 +1076,7 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
uint32_t layout_index = 0;
++layout_index;
sprintf(tmp_str, "LAYOUT%u", layout_index);
- pGVstr = vk_gv_print_vk_descriptor_set_layout_create_info(&pLayout->createInfo, tmp_str);
+ pGVstr = vk_gv_print_vkdescriptorsetlayoutcreateinfo(&pLayout->createInfo, tmp_str);
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
if (pSet->pUpdateStructs) {
@@ -1089,7 +1089,7 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
uint32_t i = 0;
for (i=0; i < pSet->descriptorCount; i++) {
if (pSet->ppDescriptors[i]) {
- fprintf(pOutFile, "<TR><TD PORT=\"slot%u\">slot%u</TD><TD>%s</TD></TR>", i, i, string_VK_STRUCTURE_TYPE(pSet->ppDescriptors[i]->sType));
+ fprintf(pOutFile, "<TR><TD PORT=\"slot%u\">slot%u</TD><TD>%s</TD></TR>", i, i, string_VkStructureType(pSet->ppDescriptors[i]->sType));
}
}
#define NUM_COLORS 7
@@ -1104,13 +1104,13 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
uint32_t colorIdx = 0;
fprintf(pOutFile, "</TABLE>>\n];\n");
// Now add the views that are mapped to active descriptors
- VK_UPDATE_SAMPLERS* pUS = NULL;
- VK_UPDATE_SAMPLER_TEXTURES* pUST = NULL;
- VK_UPDATE_IMAGES* pUI = NULL;
- VK_UPDATE_BUFFERS* pUB = NULL;
- VK_UPDATE_AS_COPY* pUAC = NULL;
- VK_SAMPLER_CREATE_INFO* pSCI = NULL;
- VK_IMAGE_VIEW_CREATE_INFO* pIVCI = NULL;
+ VkUpdateSamplers* pUS = NULL;
+ VkUpdateSamplerTextures* pUST = NULL;
+ VkUpdateImages* pUI = NULL;
+ VkUpdateBuffers* pUB = NULL;
+ VkUpdateAsCopy* pUAC = NULL;
+ VkSamplerCreateInfo* pSCI = NULL;
+ VkImageViewCreateInfo* pIVCI = NULL;
VkBufferViewCreateInfo* pBVCI = NULL;
void** ppNextPtr = NULL;
void* pSaveNext = NULL;
@@ -1119,40 +1119,40 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
switch (pSet->ppDescriptors[i]->sType)
{
case VK_STRUCTURE_TYPE_UPDATE_SAMPLERS:
- pUS = (VK_UPDATE_SAMPLERS*)pSet->ppDescriptors[i];
+ pUS = (VkUpdateSamplers*)pSet->ppDescriptors[i];
pSCI = getSamplerCreateInfo(pUS->pSamplers[i-pUS->arrayIndex]);
if (pSCI) {
sprintf(tmp_str, "SAMPLER%u", i);
- fprintf(pOutFile, "%s", vk_gv_print_vk_sampler_create_info(pSCI, tmp_str));
+ fprintf(pOutFile, "%s", vk_gv_print_vksamplercreateinfo(pSCI, tmp_str));
fprintf(pOutFile, "\"DESCRIPTORS\":slot%u -> \"%s\" [color=\"#%s\"];\n", i, tmp_str, edgeColors[colorIdx].c_str());
}
break;
case VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES:
- pUST = (VK_UPDATE_SAMPLER_TEXTURES*)pSet->ppDescriptors[i];
+ pUST = (VkUpdateSamplerTextures*)pSet->ppDescriptors[i];
pSCI = getSamplerCreateInfo(pUST->pSamplerImageViews[i-pUST->arrayIndex].sampler);
if (pSCI) {
sprintf(tmp_str, "SAMPLER%u", i);
- fprintf(pOutFile, "%s", vk_gv_print_vk_sampler_create_info(pSCI, tmp_str));
+ fprintf(pOutFile, "%s", vk_gv_print_vksamplercreateinfo(pSCI, tmp_str));
fprintf(pOutFile, "\"DESCRIPTORS\":slot%u -> \"%s\" [color=\"#%s\"];\n", i, tmp_str, edgeColors[colorIdx].c_str());
}
pIVCI = getImageViewCreateInfo(pUST->pSamplerImageViews[i-pUST->arrayIndex].pImageView->view);
if (pIVCI) {
sprintf(tmp_str, "IMAGE_VIEW%u", i);
- fprintf(pOutFile, "%s", vk_gv_print_vk_image_view_create_info(pIVCI, tmp_str));
+ fprintf(pOutFile, "%s", vk_gv_print_vkimageviewcreateinfo(pIVCI, tmp_str));
fprintf(pOutFile, "\"DESCRIPTORS\":slot%u -> \"%s\" [color=\"#%s\"];\n", i, tmp_str, edgeColors[colorIdx].c_str());
}
break;
case VK_STRUCTURE_TYPE_UPDATE_IMAGES:
- pUI = (VK_UPDATE_IMAGES*)pSet->ppDescriptors[i];
+ pUI = (VkUpdateImages*)pSet->ppDescriptors[i];
pIVCI = getImageViewCreateInfo(pUI->pImageViews[i-pUI->arrayIndex].view);
if (pIVCI) {
sprintf(tmp_str, "IMAGE_VIEW%u", i);
- fprintf(pOutFile, "%s", vk_gv_print_vk_image_view_create_info(pIVCI, tmp_str));
+ fprintf(pOutFile, "%s", vk_gv_print_vkimageviewcreateinfo(pIVCI, tmp_str));
fprintf(pOutFile, "\"DESCRIPTORS\":slot%u -> \"%s\" [color=\"#%s\"];\n", i, tmp_str, edgeColors[colorIdx].c_str());
}
break;
case VK_STRUCTURE_TYPE_UPDATE_BUFFERS:
- pUB = (VK_UPDATE_BUFFERS*)pSet->ppDescriptors[i];
+ pUB = (VkUpdateBuffers*)pSet->ppDescriptors[i];
pBVCI = getBufferViewCreateInfo(pUB->pBufferViews[i-pUB->arrayIndex].view);
if (pBVCI) {
sprintf(tmp_str, "BUFFER_VIEW%u", i);
@@ -1161,14 +1161,14 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
}
break;
case VK_STRUCTURE_TYPE_UPDATE_AS_COPY:
- pUAC = (VK_UPDATE_AS_COPY*)pSet->ppDescriptors[i];
+ pUAC = (VkUpdateAsCopy*)pSet->ppDescriptors[i];
// TODO : Need to validate this code
// Save off pNext and set to NULL while printing this struct, then restore it
ppNextPtr = (void**)&pUAC->pNext;
pSaveNext = *ppNextPtr;
*ppNextPtr = NULL;
sprintf(tmp_str, "UPDATE_AS_COPY%u", i);
- fprintf(pOutFile, "%s", vk_gv_print_vk_update_as_copy(pUAC, tmp_str));
+ fprintf(pOutFile, "%s", vk_gv_print_vkupdateascopy(pUAC, tmp_str));
fprintf(pOutFile, "\"DESCRIPTORS\":slot%u -> \"%s\" [color=\"#%s\"];\n", i, tmp_str, edgeColors[colorIdx].c_str());
// Restore next ptr
*ppNextPtr = pSaveNext;
@@ -1185,7 +1185,7 @@ static void dsCoreDumpDot(const VK_DESCRIPTOR_SET ds, FILE* pOutFile)
}
}
// Dump subgraph w/ DS info
-static void dsDumpDot(const VK_CMD_BUFFER cb, FILE* pOutFile)
+static void dsDumpDot(const VkCmdBuffer cb, FILE* pOutFile)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB && pCB->lastBoundDescriptorSet) {
@@ -1237,14 +1237,14 @@ static void dumpGlobalDotFile(char *outFileName)
char* pGVstr = NULL;
for (uint32_t i = 0; i < VK_NUM_STATE_BIND_POINT; i++) {
if (g_lastBoundDynamicState[i] && g_lastBoundDynamicState[i]->pCreateInfo) {
- pGVstr = dynamic_gv_display(g_lastBoundDynamicState[i]->pCreateInfo, string_VK_STATE_BIND_POINT((VK_STATE_BIND_POINT)i));
+ pGVstr = dynamic_gv_display(g_lastBoundDynamicState[i]->pCreateInfo, string_VkStateBindPoint((VkStateBindPoint)i));
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
}
}
fprintf(pOutFile, "}\n"); // close dynamicState subgraph
fprintf(pOutFile, "subgraph cluster_PipelineStateObject\n{\nlabel=\"Pipeline State Object\"\n");
- pGVstr = vk_gv_print_vk_graphics_pipeline_create_info(&pPipeTrav->graphicsPipelineCI, "PSO HEAD");
+ pGVstr = vk_gv_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "PSO HEAD");
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
fprintf(pOutFile, "}\n");
@@ -1254,7 +1254,7 @@ static void dumpGlobalDotFile(char *outFileName)
}
}
// Dump a GraphViz dot file showing the pipeline for a given CB
-static void dumpDotFile(const VK_CMD_BUFFER cb, string outFileName)
+static void dumpDotFile(const VkCmdBuffer cb, string outFileName)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB) {
@@ -1267,14 +1267,14 @@ static void dumpDotFile(const VK_CMD_BUFFER cb, string outFileName)
char* pGVstr = NULL;
for (uint32_t i = 0; i < VK_NUM_STATE_BIND_POINT; i++) {
if (pCB->lastBoundDynamicState[i] && pCB->lastBoundDynamicState[i]->pCreateInfo) {
- pGVstr = dynamic_gv_display(pCB->lastBoundDynamicState[i]->pCreateInfo, string_VK_STATE_BIND_POINT((VK_STATE_BIND_POINT)i));
+ pGVstr = dynamic_gv_display(pCB->lastBoundDynamicState[i]->pCreateInfo, string_VkStateBindPoint((VkStateBindPoint)i));
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
}
}
fprintf(pOutFile, "}\n"); // close dynamicState subgraph
fprintf(pOutFile, "subgraph cluster_PipelineStateObject\n{\nlabel=\"Pipeline State Object\"\n");
- pGVstr = vk_gv_print_vk_graphics_pipeline_create_info(&pPipeTrav->graphicsPipelineCI, "PSO HEAD");
+ pGVstr = vk_gv_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "PSO HEAD");
fprintf(pOutFile, "%s", pGVstr);
free(pGVstr);
fprintf(pOutFile, "}\n");
@@ -1285,7 +1285,7 @@ static void dumpDotFile(const VK_CMD_BUFFER cb, string outFileName)
}
}
// Verify VB Buffer binding
-static void validateVBBinding(const VK_CMD_BUFFER cb)
+static void validateVBBinding(const VkCmdBuffer cb)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB && pCB->lastBoundPipeline) {
@@ -1310,7 +1310,7 @@ static void validateVBBinding(const VK_CMD_BUFFER cb)
}
}
else {
- string tmpStr = vk_print_vk_vertex_input_binding_description(&pPipeTrav->pVertexBindingDescriptions[pCB->lastVtxBinding], "{DS}INFO : ").c_str();
+ string tmpStr = vk_print_vkvertexinputbindingdescription(&pPipeTrav->pVertexBindingDescriptions[pCB->lastVtxBinding], "{DS}INFO : ").c_str();
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", tmpStr.c_str());
}
}
@@ -1318,7 +1318,7 @@ static void validateVBBinding(const VK_CMD_BUFFER cb)
}
}
// Print details of DS config to stdout
-static void printDSConfig(const VK_CMD_BUFFER cb)
+static void printDSConfig(const VkCmdBuffer cb)
{
char tmp_str[1024];
char ds_config_str[1024*256] = {0}; // TODO : Currently making this buffer HUGE w/o overrun protection. Need to be smarter, start smaller, and grow as needed.
@@ -1329,7 +1329,7 @@ static void printDSConfig(const VK_CMD_BUFFER cb)
// Print out pool details
sprintf(tmp_str, "Details for pool %p.", (void*)pPool->pool);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", tmp_str);
- string poolStr = vk_print_vk_descriptor_pool_create_info(&pPool->createInfo, " ");
+ string poolStr = vk_print_vkdescriptorpoolcreateinfo(&pPool->createInfo, " ");
sprintf(ds_config_str, "%s", poolStr.c_str());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", ds_config_str);
// Print out set details
@@ -1342,7 +1342,7 @@ static void printDSConfig(const VK_CMD_BUFFER cb)
sprintf(tmp_str, "Layout #%u, (object %p) for DS %p.", index+1, (void*)pLayout->layout, (void*)pSet->set);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", tmp_str);
sprintf(prefix, " [L%u] ", index);
- string DSLstr = vk_print_vk_descriptor_set_layout_create_info(&pLayout->createInfo, prefix).c_str();
+ string DSLstr = vk_print_vkdescriptorsetlayoutcreateinfo(&pLayout->createInfo, prefix).c_str();
sprintf(ds_config_str, "%s", DSLstr.c_str());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, DRAWSTATE_NONE, "DS", ds_config_str);
index++;
@@ -1362,7 +1362,7 @@ static void printDSConfig(const VK_CMD_BUFFER cb)
}
}
-static void printCB(const VK_CMD_BUFFER cb)
+static void printCB(const VkCmdBuffer cb)
{
GLOBAL_CB_NODE* pCB = getCBNode(cb);
if (pCB) {
@@ -1380,7 +1380,7 @@ static void printCB(const VK_CMD_BUFFER cb)
}
-static void synchAndPrintDSConfig(const VK_CMD_BUFFER cb)
+static void synchAndPrintDSConfig(const VkCmdBuffer cb)
{
printDSConfig(cb);
printPipeline(cb);
@@ -1420,13 +1420,13 @@ static void initDrawState(void)
}
// initialize Layer dispatch table
// TODO handle multiple GPUs
- vkGetProcAddrType fpNextGPA;
+ PFN_vkGetProcAddr fpNextGPA;
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
- vkGetProcAddrType fpGetProcAddr = (vkGetProcAddrType)fpNextGPA((VK_PHYSICAL_GPU) pCurObj->nextObject, (char *) "vkGetProcAddr");
+ PFN_vkGetProcAddr fpGetProcAddr = (PFN_vkGetProcAddr)fpNextGPA((VkPhysicalGpu) pCurObj->nextObject, (char *) "vkGetProcAddr");
nextTable.GetProcAddr = fpGetProcAddr;
if (!globalLockInitialized)
@@ -1441,16 +1441,16 @@ static void initDrawState(void)
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&g_initOnce, initDrawState);
- VK_RESULT result = nextTable.CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = nextTable.CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyDevice(VkDevice device)
{
// Free all the memory
loader_platform_thread_lock_mutex(&globalLock);
@@ -1463,14 +1463,14 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
deletePools();
deleteLayouts();
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.DestroyDevice(device);
+ VkResult result = nextTable.DestroyDevice(device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
- VK_RESULT result;
+ VkResult result;
/* This entrypoint is NOT going to init its own dispatch table since loader calls here early */
if (!strcmp(pExtName, "DrawState") || !strcmp(pExtName, "drawStateDumpDotFile") ||
!strcmp(pExtName, "drawStateDumpCommandBufferDotFile") || !strcmp(pExtName, "drawStateDumpPngFile"))
@@ -1478,7 +1478,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
result = VK_SUCCESS;
} else if (nextTable.GetExtensionSupport != NULL)
{
- result = nextTable.GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ result = nextTable.GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
} else
{
result = VK_ERROR_INVALID_EXTENSION;
@@ -1486,14 +1486,14 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&g_initOnce, initDrawState);
- VK_RESULT result = nextTable.EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = nextTable.EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
return result;
} else
{
@@ -1506,25 +1506,25 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSubmit(VK_QUEUE queue, uint32_t cmdBufferCount, const VK_CMD_BUFFER* pCmdBuffers, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSubmit(VkQueue queue, uint32_t cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence)
{
for (uint32_t i=0; i < cmdBufferCount; i++) {
// Validate that cmd buffers have been updated
}
- VK_RESULT result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
+ VkResult result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object)
{
// TODO : When wrapped objects (such as dynamic state) are destroyed, need to clean up memory
- VK_RESULT result = nextTable.DestroyObject(object);
+ VkResult result = nextTable.DestroyObject(object);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBufferViewCreateInfo* pCreateInfo, VK_BUFFER_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView)
{
- VK_RESULT result = nextTable.CreateBufferView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateBufferView(device, pCreateInfo, pView);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
BUFFER_NODE* pNewNode = new BUFFER_NODE;
@@ -1536,9 +1536,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBuf
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo, VK_IMAGE_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView)
{
- VK_RESULT result = nextTable.CreateImageView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateImageView(device, pCreateInfo, pView);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
IMAGE_NODE *pNewNode = new IMAGE_NODE;
@@ -1550,7 +1550,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMA
return result;
}
-static void track_pipeline(const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+static void track_pipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
// Create LL HEAD for this Pipeline
loader_platform_thread_lock_mutex(&globalLock);
@@ -1561,9 +1561,9 @@ static void track_pipeline(const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
// Create LL HEAD for this Pipeline
char str[1024];
sprintf(str, "Created Gfx Pipeline %p", (void*)*pPipeline);
@@ -1574,13 +1574,13 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipelineDerivative(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
// Create LL HEAD for this Pipeline
char str[1024];
sprintf(str, "Created Gfx Pipeline %p (derived from pipeline %p)", (void*)*pPipeline, basePipeline);
@@ -1591,9 +1591,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPLER_CREATE_INFO* pCreateInfo, VK_SAMPLER* pSampler)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler)
{
- VK_RESULT result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
+ VkResult result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
SAMPLER_NODE* pNewNode = new SAMPLER_NODE;
@@ -1605,9 +1605,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPL
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(VK_DEVICE device, const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_SET_LAYOUT* pSetLayout)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout)
{
- VK_RESULT result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
+ VkResult result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
if (VK_SUCCESS == result) {
LAYOUT_NODE* pNewNode = new LAYOUT_NODE;
if (NULL == pNewNode) {
@@ -1616,20 +1616,20 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(VK_DEVICE device, co
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, *pSetLayout, 0, DRAWSTATE_OUT_OF_MEMORY, "DS", str);
}
memset(pNewNode, 0, sizeof(LAYOUT_NODE));
- memcpy((void*)&pNewNode->createInfo, pCreateInfo, sizeof(VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO));
- pNewNode->createInfo.pBinding = new VK_DESCRIPTOR_SET_LAYOUT_BINDING[pCreateInfo->count];
- memcpy((void*)pNewNode->createInfo.pBinding, pCreateInfo->pBinding, sizeof(VK_DESCRIPTOR_SET_LAYOUT_BINDING)*pCreateInfo->count);
+ memcpy((void*)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
+ pNewNode->createInfo.pBinding = new VkDescriptorSetLayoutBinding[pCreateInfo->count];
+ memcpy((void*)pNewNode->createInfo.pBinding, pCreateInfo->pBinding, sizeof(VkDescriptorSetLayoutBinding)*pCreateInfo->count);
uint32_t totalCount = 0;
for (uint32_t i=0; i<pCreateInfo->count; i++) {
totalCount += pCreateInfo->pBinding[i].count;
if (pCreateInfo->pBinding[i].pImmutableSamplers) {
- VK_SAMPLER** ppIS = (VK_SAMPLER**)&pNewNode->createInfo.pBinding[i].pImmutableSamplers;
- *ppIS = new VK_SAMPLER[pCreateInfo->pBinding[i].count];
- memcpy(*ppIS, pCreateInfo->pBinding[i].pImmutableSamplers, pCreateInfo->pBinding[i].count*sizeof(VK_SAMPLER));
+ VkSampler** ppIS = (VkSampler**)&pNewNode->createInfo.pBinding[i].pImmutableSamplers;
+ *ppIS = new VkSampler[pCreateInfo->pBinding[i].count];
+ memcpy(*ppIS, pCreateInfo->pBinding[i].pImmutableSamplers, pCreateInfo->pBinding[i].count*sizeof(VkSampler));
}
}
if (totalCount > 0) {
- pNewNode->pTypes = new VK_DESCRIPTOR_TYPE[totalCount];
+ pNewNode->pTypes = new VkDescriptorType[totalCount];
uint32_t offset = 0;
uint32_t j = 0;
for (uint32_t i=0; i<pCreateInfo->count; i++) {
@@ -1651,18 +1651,18 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(VK_DEVICE device, co
return result;
}
-VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(VK_DEVICE device, uint32_t setLayoutArrayCount, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayoutArray, VK_DESCRIPTOR_SET_LAYOUT_CHAIN* pLayoutChain)
+VkResult VKAPI vkCreateDescriptorSetLayoutChain(VkDevice device, uint32_t setLayoutArrayCount, const VkDescriptorSetLayout* pSetLayoutArray, VkDescriptorSetLayoutChain* pLayoutChain)
{
- VK_RESULT result = nextTable.CreateDescriptorSetLayoutChain(device, setLayoutArrayCount, pSetLayoutArray, pLayoutChain);
+ VkResult result = nextTable.CreateDescriptorSetLayoutChain(device, setLayoutArrayCount, pSetLayoutArray, pLayoutChain);
if (VK_SUCCESS == result) {
// TODO : Need to capture the layout chains
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(VK_DEVICE device, VK_DESCRIPTOR_UPDATE_MODE updateMode)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginDescriptorPoolUpdate(VkDevice device, VkDescriptorUpdateMode updateMode)
{
- VK_RESULT result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
+ VkResult result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
POOL_NODE* pPoolNode = poolMap.begin()->second;
@@ -1679,9 +1679,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(VK_DEVICE device, VK
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(VK_DEVICE device, VK_CMD_BUFFER cmd)
+VK_LAYER_EXPORT VkResult VKAPI vkEndDescriptorPoolUpdate(VkDevice device, VkCmdBuffer cmd)
{
- VK_RESULT result = nextTable.EndDescriptorPoolUpdate(device, cmd);
+ VkResult result = nextTable.EndDescriptorPoolUpdate(device, cmd);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
POOL_NODE* pPoolNode = poolMap.begin()->second;
@@ -1706,28 +1706,28 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(VK_DEVICE device, VK_C
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESCRIPTOR_POOL_USAGE poolUsage, uint32_t maxSets, const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_POOL* pDescriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorPool(VkDevice device, VkDescriptorPoolUsage poolUsage, uint32_t maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool)
{
- VK_RESULT result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
+ VkResult result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
if (VK_SUCCESS == result) {
// Insert this pool into Global Pool LL at head
char str[1024];
sprintf(str, "Created Descriptor Pool %p", (void*)*pDescriptorPool);
- layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (VK_BASE_OBJECT)pDescriptorPool, 0, DRAWSTATE_NONE, "DS", str);
+ layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (VkBaseObject)pDescriptorPool, 0, DRAWSTATE_NONE, "DS", str);
loader_platform_thread_lock_mutex(&globalLock);
POOL_NODE* pNewNode = new POOL_NODE;
if (NULL == pNewNode) {
char str[1024];
sprintf(str, "Out of memory while attempting to allocate POOL_NODE in vkCreateDescriptorPool()");
- layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, (VK_BASE_OBJECT)*pDescriptorPool, 0, DRAWSTATE_OUT_OF_MEMORY, "DS", str);
+ layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, (VkBaseObject)*pDescriptorPool, 0, DRAWSTATE_OUT_OF_MEMORY, "DS", str);
}
else {
memset(pNewNode, 0, sizeof(POOL_NODE));
- VK_DESCRIPTOR_POOL_CREATE_INFO* pCI = (VK_DESCRIPTOR_POOL_CREATE_INFO*)&pNewNode->createInfo;
- memcpy((void*)pCI, pCreateInfo, sizeof(VK_DESCRIPTOR_POOL_CREATE_INFO));
+ VkDescriptorPoolCreateInfo* pCI = (VkDescriptorPoolCreateInfo*)&pNewNode->createInfo;
+ memcpy((void*)pCI, pCreateInfo, sizeof(VkDescriptorPoolCreateInfo));
if (pNewNode->createInfo.count) {
- size_t typeCountSize = pNewNode->createInfo.count * sizeof(VK_DESCRIPTOR_TYPE_COUNT);
- pNewNode->createInfo.pTypeCount = new VK_DESCRIPTOR_TYPE_COUNT[typeCountSize];
+ size_t typeCountSize = pNewNode->createInfo.count * sizeof(VkDescriptorTypeCount);
+ pNewNode->createInfo.pTypeCount = new VkDescriptorTypeCount[typeCountSize];
memcpy((void*)pNewNode->createInfo.pTypeCount, pCreateInfo->pTypeCount, typeCountSize);
}
pNewNode->poolUsage = poolUsage;
@@ -1744,18 +1744,18 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESC
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetDescriptorPool(VK_DESCRIPTOR_POOL descriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkResetDescriptorPool(VkDescriptorPool descriptorPool)
{
- VK_RESULT result = nextTable.ResetDescriptorPool(descriptorPool);
+ VkResult result = nextTable.ResetDescriptorPool(descriptorPool);
if (VK_SUCCESS == result) {
clearDescriptorPool(descriptorPool);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, VK_DESCRIPTOR_SET_USAGE setUsage, uint32_t count, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts, VK_DESCRIPTOR_SET* pDescriptorSets, uint32_t* pCount)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocDescriptorSets(VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, uint32_t count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, uint32_t* pCount)
{
- VK_RESULT result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
+ VkResult result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
if ((VK_SUCCESS == result) || (*pCount > 0)) {
POOL_NODE *pPoolNode = getPoolNode(descriptorPool);
if (!pPoolNode) {
@@ -1804,7 +1804,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(VK_DESCRIPTOR_POOL descrip
return result;
}
-VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets)
+VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets)
{
for (uint32_t i = 0; i < count; i++) {
clearDescriptorSet(pDescriptorSets[i]);
@@ -1812,7 +1812,7 @@ VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VK_DESCRIPTOR_POOL descriptorPo
nextTable.ClearDescriptorSets(descriptorPool, count, pDescriptorSets);
}
-VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VK_DESCRIPTOR_SET descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
+VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
{
SET_NODE* pSet = getSetNode(descriptorSet);
if (!dsUpdateActive(descriptorSet)) {
@@ -1828,37 +1828,37 @@ VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VK_DESCRIPTOR_SET descriptorSet,
nextTable.UpdateDescriptors(descriptorSet, updateCount, ppUpdateArray);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_VP_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicViewportState(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_VIEWPORT);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_RS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicRasterState(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_RASTER);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device, const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_CB_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_COLOR_BLEND);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE device, const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_DS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo, VkDynamicDsStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_DEPTH_STENCIL);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, VK_CMD_BUFFER* pCmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateCommandBuffer(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer)
{
- VK_RESULT result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
+ VkResult result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
GLOBAL_CB_NODE* pCB = new GLOBAL_CB_NODE;
@@ -1874,9 +1874,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginCommandBuffer(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
{
- VK_RESULT result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
+ VkResult result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
if (VK_SUCCESS == result) {
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -1884,7 +1884,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, co
resetCB(cmdBuffer);
pCB->state = CB_UPDATE_ACTIVE;
if (pBeginInfo->pNext) {
- VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO* pCbGfxBI = (VK_CMD_BUFFER_GRAPHICS_BEGIN_INFO*)pBeginInfo->pNext;
+ VkCmdBufferGraphicsBeginInfo* pCbGfxBI = (VkCmdBufferGraphicsBeginInfo*)pBeginInfo->pNext;
if (VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO == pCbGfxBI->sType) {
pCB->activeRenderPass = pCbGfxBI->renderPassContinue.renderPass;
}
@@ -1900,9 +1900,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, co
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkEndCommandBuffer(VkCmdBuffer cmdBuffer)
{
- VK_RESULT result = nextTable.EndCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.EndCommandBuffer(cmdBuffer);
if (VK_SUCCESS == result) {
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -1920,9 +1920,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkResetCommandBuffer(VkCmdBuffer cmdBuffer)
{
- VK_RESULT result = nextTable.ResetCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.ResetCommandBuffer(cmdBuffer);
if (VK_SUCCESS == result) {
resetCB(cmdBuffer);
updateCBTracking(cmdBuffer);
@@ -1930,7 +1930,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_PIPELINE pipeline)
+VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -1958,13 +1958,13 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELIN
nextTable.CmdBindPipeline(cmdBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer, VK_STATE_BIND_POINT stateBindPoint, VK_DYNAMIC_STATE_OBJECT state)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state)
{
setLastBoundDynamicState(cmdBuffer, state, stateBindPoint);
nextTable.CmdBindDynamicStateObject(cmdBuffer, stateBindPoint, state);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain, uint32_t layoutChainSlot, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets, const uint32_t* pUserData)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkDescriptorSetLayoutChain layoutChain, uint32_t layoutChainSlot, uint32_t count, const VkDescriptorSet* pDescriptorSets, const uint32_t* pUserData)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -1985,7 +1985,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_P
g_lastBoundDescriptorSet = pDescriptorSets[i];
loader_platform_thread_unlock_mutex(&globalLock);
char str[1024];
- sprintf(str, "DS %p bound on pipeline %s", (void*)pDescriptorSets[i], string_VK_PIPELINE_BIND_POINT(pipelineBindPoint));
+ sprintf(str, "DS %p bound on pipeline %s", (void*)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, pDescriptorSets[i], 0, DRAWSTATE_NONE, "DS", str);
synchAndPrintDSConfig(cmdBuffer);
}
@@ -2004,7 +2004,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layoutChain, layoutChainSlot, count, pDescriptorSets, pUserData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, VK_INDEX_TYPE indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2020,7 +2020,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFF
nextTable.CmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t binding)
+VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t binding)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2037,7 +2037,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUF
nextTable.CmdBindVertexBuffer(cmdBuffer, buffer, offset, binding);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDraw(VK_CMD_BUFFER cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDraw(VkCmdBuffer cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2057,7 +2057,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDraw(VK_CMD_BUFFER cmdBuffer, uint32_t firstVert
nextTable.CmdDraw(cmdBuffer, firstVertex, vertexCount, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VK_CMD_BUFFER cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VkCmdBuffer cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2077,7 +2077,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VK_CMD_BUFFER cmdBuffer, uint32_t fi
nextTable.CmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2097,7 +2097,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2117,7 +2117,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VK_CMD_BUFFER cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2132,7 +2132,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VK_CMD_BUFFER cmdBuffer, uint32_t x, ui
nextTable.CmdDispatch(cmdBuffer, x, y, z);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2147,7 +2147,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUF
nextTable.CmdDispatchIndirect(cmdBuffer, buffer, offset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2162,12 +2162,12 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER sr
nextTable.CmdCopyBuffer(cmdBuffer, srcBuffer, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage,
- VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage,
- VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage destImage,
+ VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkImageCopy* pRegions)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2182,10 +2182,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdCopyImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_IMAGE_BLIT* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkImageBlit* pRegions)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2200,10 +2200,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdBlitImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2218,10 +2218,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdCopyBufferToImage(cmdBuffer, srcBuffer, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_BUFFER destBuffer,
- uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
+ uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2236,7 +2236,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdCopyImageToBuffer(cmdBuffer, srcImage, srcImageLayout, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout)
+VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2251,7 +2251,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2266,7 +2266,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2281,10 +2281,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER de
nextTable.CmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout,
- VK_CLEAR_COLOR color,
- uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VkCmdBuffer cmdBuffer,
+ VkImage image, VkImageLayout imageLayout,
+ VkClearColor color,
+ uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2299,10 +2299,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdClearColorImage(cmdBuffer, image, imageLayout, color, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout,
+VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VkCmdBuffer cmdBuffer,
+ VkImage image, VkImageLayout imageLayout,
float depth, uint32_t stencil,
- uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+ uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2317,10 +2317,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdClearDepthStencil(cmdBuffer, image, imageLayout, depth, stencil, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t rectCount, const VK_IMAGE_RESOLVE* pRects)
+VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t rectCount, const VkImageResolve* pRects)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2335,7 +2335,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdResolveImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, rectCount, pRects);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2350,7 +2350,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event
nextTable.CmdSetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2365,7 +2365,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT eve
nextTable.CmdResetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVENT_WAIT_INFO* pWaitInfo)
+VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2380,7 +2380,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVE
nextTable.CmdWaitEvents(cmdBuffer, pWaitInfo);
}
-VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const VK_PIPELINE_BARRIER* pBarrier)
+VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2395,7 +2395,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdPipelineBarrier(cmdBuffer, pBarrier);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot, VK_FLAGS flags)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2410,7 +2410,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POO
nextTable.CmdBeginQuery(cmdBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot)
+VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2425,7 +2425,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL
nextTable.CmdEndQuery(cmdBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount)
+VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2440,7 +2440,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMESTAMP_TYPE timestampType, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2455,7 +2455,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMES
nextTable.CmdWriteTimestamp(cmdBuffer, timestampType, destBuffer, destOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2470,7 +2470,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdInitAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER srcBuffer, VK_GPU_SIZE srcOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2485,7 +2485,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdLoadAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, srcBuffer, srcOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2500,49 +2500,49 @@ VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdSaveAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, destBuffer, destOffset);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(VK_DEVICE device, const VK_FRAMEBUFFER_CREATE_INFO* pCreateInfo, VK_FRAMEBUFFER* pFramebuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer)
{
- VK_RESULT result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
+ VkResult result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
if (VK_SUCCESS == result) {
// Shadow create info and store in map
- VK_FRAMEBUFFER_CREATE_INFO* localFBCI = new VK_FRAMEBUFFER_CREATE_INFO(*pCreateInfo);
+ VkFramebufferCreateInfo* localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
if (pCreateInfo->pColorAttachments) {
- localFBCI->pColorAttachments = new VK_COLOR_ATTACHMENT_BIND_INFO[localFBCI->colorAttachmentCount];
- memcpy((void*)localFBCI->pColorAttachments, pCreateInfo->pColorAttachments, localFBCI->colorAttachmentCount*sizeof(VK_COLOR_ATTACHMENT_BIND_INFO));
+ localFBCI->pColorAttachments = new VkColorAttachmentBindInfo[localFBCI->colorAttachmentCount];
+ memcpy((void*)localFBCI->pColorAttachments, pCreateInfo->pColorAttachments, localFBCI->colorAttachmentCount*sizeof(VkColorAttachmentBindInfo));
}
if (pCreateInfo->pDepthStencilAttachment) {
- localFBCI->pDepthStencilAttachment = new VK_DEPTH_STENCIL_BIND_INFO[localFBCI->colorAttachmentCount];
- memcpy((void*)localFBCI->pDepthStencilAttachment, pCreateInfo->pDepthStencilAttachment, localFBCI->colorAttachmentCount*sizeof(VK_DEPTH_STENCIL_BIND_INFO));
+ localFBCI->pDepthStencilAttachment = new VkDepthStencilBindInfo[localFBCI->colorAttachmentCount];
+ memcpy((void*)localFBCI->pDepthStencilAttachment, pCreateInfo->pDepthStencilAttachment, localFBCI->colorAttachmentCount*sizeof(VkDepthStencilBindInfo));
}
frameBufferMap[*pFramebuffer] = localFBCI;
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCreateInfo, VK_RENDER_PASS* pRenderPass)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass)
{
- VK_RESULT result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
+ VkResult result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
if (VK_SUCCESS == result) {
// Shadow create info and store in map
- VK_RENDER_PASS_CREATE_INFO* localRPCI = new VK_RENDER_PASS_CREATE_INFO(*pCreateInfo);
+ VkRenderPassCreateInfo* localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
if (pCreateInfo->pColorLoadOps) {
- localRPCI->pColorLoadOps = new VK_ATTACHMENT_LOAD_OP[localRPCI->colorAttachmentCount];
- memcpy((void*)localRPCI->pColorLoadOps, pCreateInfo->pColorLoadOps, localRPCI->colorAttachmentCount*sizeof(VK_ATTACHMENT_LOAD_OP));
+ localRPCI->pColorLoadOps = new VkAttachmentLoadOp[localRPCI->colorAttachmentCount];
+ memcpy((void*)localRPCI->pColorLoadOps, pCreateInfo->pColorLoadOps, localRPCI->colorAttachmentCount*sizeof(VkAttachmentLoadOp));
}
if (pCreateInfo->pColorStoreOps) {
- localRPCI->pColorStoreOps = new VK_ATTACHMENT_STORE_OP[localRPCI->colorAttachmentCount];
- memcpy((void*)localRPCI->pColorStoreOps, pCreateInfo->pColorStoreOps, localRPCI->colorAttachmentCount*sizeof(VK_ATTACHMENT_STORE_OP));
+ localRPCI->pColorStoreOps = new VkAttachmentStoreOp[localRPCI->colorAttachmentCount];
+ memcpy((void*)localRPCI->pColorStoreOps, pCreateInfo->pColorStoreOps, localRPCI->colorAttachmentCount*sizeof(VkAttachmentStoreOp));
}
if (pCreateInfo->pColorLoadClearValues) {
- localRPCI->pColorLoadClearValues = new VK_CLEAR_COLOR[localRPCI->colorAttachmentCount];
- memcpy((void*)localRPCI->pColorLoadClearValues, pCreateInfo->pColorLoadClearValues, localRPCI->colorAttachmentCount*sizeof(VK_CLEAR_COLOR));
+ localRPCI->pColorLoadClearValues = new VkClearColor[localRPCI->colorAttachmentCount];
+ memcpy((void*)localRPCI->pColorLoadClearValues, pCreateInfo->pColorLoadClearValues, localRPCI->colorAttachmentCount*sizeof(VkClearColor));
}
renderPassMap[*pRenderPass] = localRPCI;
}
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const VK_RENDER_PASS_BEGIN *pRenderPassBegin)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VkCmdBuffer cmdBuffer, const VkRenderPassBegin *pRenderPassBegin)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2561,7 +2561,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdBeginRenderPass(cmdBuffer, pRenderPassBegin);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VK_CMD_BUFFER cmdBuffer, VK_RENDER_PASS renderPass)
+VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VkCmdBuffer cmdBuffer, VkRenderPass renderPass)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2577,7 +2577,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VK_CMD_BUFFER cmdBuffer, VK_RENDER
nextTable.CmdEndRenderPass(cmdBuffer, renderPass);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
{
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE* pNewDbgFuncNode = new VK_LAYER_DBG_FUNCTION_NODE;
@@ -2591,11 +2591,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, V
if (g_actionIsDefault) {
g_debugAction = VK_DBG_LAYER_ACTION_CALLBACK;
}
- VK_RESULT result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
+ VkResult result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
VK_LAYER_DBG_FUNCTION_NODE *pTrav = g_pDbgFunctionHead;
VK_LAYER_DBG_FUNCTION_NODE *pPrev = pTrav;
@@ -2617,11 +2617,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance,
else
g_debugAction = (VK_LAYER_DBG_ACTION)(g_debugAction & ~((uint32_t)VK_DBG_LAYER_ACTION_CALLBACK));
}
- VK_RESULT result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
+ VkResult result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VK_CMD_BUFFER cmdBuffer, const char* pMarker)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VkCmdBuffer cmdBuffer, const char* pMarker)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2636,7 +2636,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VK_CMD_BUFFER cmdBuffer, const ch
nextTable.CmdDbgMarkerBegin(cmdBuffer, pMarker);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VkCmdBuffer cmdBuffer)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
@@ -2688,7 +2688,7 @@ void drawStateDumpPngFile(char* outFileName)
#endif // WIN32
}
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
@@ -2844,6 +2844,6 @@ VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcN
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
}
}
diff --git a/layers/draw_state.h b/layers/draw_state.h
index 426bf38a..f1404724 100644
--- a/layers/draw_state.h
+++ b/layers/draw_state.h
@@ -65,84 +65,84 @@ typedef enum _DRAW_TYPE
typedef struct _SHADER_DS_MAPPING {
uint32_t slotCount;
- VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pShaderMappingSlot;
+ VkDescriptorSetLayoutCreateInfo* pShaderMappingSlot;
} SHADER_DS_MAPPING;
typedef struct _GENERIC_HEADER {
- VK_STRUCTURE_TYPE sType;
+ VkStructureType sType;
const void* pNext;
} GENERIC_HEADER;
typedef struct _PIPELINE_NODE {
- VK_PIPELINE pipeline;
-
- VK_GRAPHICS_PIPELINE_CREATE_INFO graphicsPipelineCI;
- VK_PIPELINE_VERTEX_INPUT_CREATE_INFO vertexInputCI;
- VK_PIPELINE_IA_STATE_CREATE_INFO iaStateCI;
- VK_PIPELINE_TESS_STATE_CREATE_INFO tessStateCI;
- VK_PIPELINE_VP_STATE_CREATE_INFO vpStateCI;
- VK_PIPELINE_RS_STATE_CREATE_INFO rsStateCI;
- VK_PIPELINE_MS_STATE_CREATE_INFO msStateCI;
- VK_PIPELINE_CB_STATE_CREATE_INFO cbStateCI;
- VK_PIPELINE_DS_STATE_CREATE_INFO dsStateCI;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO vsCI;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO tcsCI;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO tesCI;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO gsCI;
- VK_PIPELINE_SHADER_STAGE_CREATE_INFO fsCI;
- // Compute shader is include in VK_COMPUTE_PIPELINE_CREATE_INFO
- VK_COMPUTE_PIPELINE_CREATE_INFO computePipelineCI;
-
- VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateTree; // Ptr to shadow of data in create tree
+ VkPipeline pipeline;
+
+ VkGraphicsPipelineCreateInfo graphicsPipelineCI;
+ VkPipelineVertexInputCreateInfo vertexInputCI;
+ VkPipelineIaStateCreateInfo iaStateCI;
+ VkPipelineTessStateCreateInfo tessStateCI;
+ VkPipelineVpStateCreateInfo vpStateCI;
+ VkPipelineRsStateCreateInfo rsStateCI;
+ VkPipelineMsStateCreateInfo msStateCI;
+ VkPipelineCbStateCreateInfo cbStateCI;
+ VkPipelineDsStateCreateInfo dsStateCI;
+ VkPipelineShaderStageCreateInfo vsCI;
+ VkPipelineShaderStageCreateInfo tcsCI;
+ VkPipelineShaderStageCreateInfo tesCI;
+ VkPipelineShaderStageCreateInfo gsCI;
+ VkPipelineShaderStageCreateInfo fsCI;
+ // Compute shader is include in VkComputePipelineCreateInfo
+ VkComputePipelineCreateInfo computePipelineCI;
+
+ VkGraphicsPipelineCreateInfo* pCreateTree; // Ptr to shadow of data in create tree
// Vtx input info (if any)
uint32_t vtxBindingCount; // number of bindings
- VK_VERTEX_INPUT_BINDING_DESCRIPTION* pVertexBindingDescriptions;
+ VkVertexInputBindingDescription* pVertexBindingDescriptions;
uint32_t vtxAttributeCount; // number of attributes
- VK_VERTEX_INPUT_ATTRIBUTE_DESCRIPTION* pVertexAttributeDescriptions;
+ VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
uint32_t attachmentCount; // number of CB attachments
- VK_PIPELINE_CB_ATTACHMENT_STATE* pAttachments;
+ VkPipelineCbAttachmentState* pAttachments;
} PIPELINE_NODE;
typedef struct _SAMPLER_NODE {
- VK_SAMPLER sampler;
- VK_SAMPLER_CREATE_INFO createInfo;
+ VkSampler sampler;
+ VkSamplerCreateInfo createInfo;
} SAMPLER_NODE;
typedef struct _IMAGE_NODE {
- VK_IMAGE_VIEW image;
- VK_IMAGE_VIEW_CREATE_INFO createInfo;
- VK_IMAGE_VIEW_ATTACH_INFO attachInfo;
+ VkImageView image;
+ VkImageViewCreateInfo createInfo;
+ VkImageViewAttachInfo attachInfo;
} IMAGE_NODE;
typedef struct _BUFFER_NODE {
- VK_BUFFER_VIEW buffer;
+ VkBufferView buffer;
VkBufferViewCreateInfo createInfo;
- VK_BUFFER_VIEW_ATTACH_INFO attachInfo;
+ VkBufferViewAttachInfo attachInfo;
} BUFFER_NODE;
typedef struct _DYNAMIC_STATE_NODE {
- VK_DYNAMIC_STATE_OBJECT stateObj;
+ VkDynamicStateObject stateObj;
GENERIC_HEADER* pCreateInfo;
union {
- VK_DYNAMIC_VP_STATE_CREATE_INFO vpci;
- VK_DYNAMIC_RS_STATE_CREATE_INFO rsci;
- VK_DYNAMIC_CB_STATE_CREATE_INFO cbci;
- VK_DYNAMIC_DS_STATE_CREATE_INFO dsci;
+ VkDynamicVpStateCreateInfo vpci;
+ VkDynamicRsStateCreateInfo rsci;
+ VkDynamicCbStateCreateInfo cbci;
+ VkDynamicDsStateCreateInfo dsci;
} create_info;
} DYNAMIC_STATE_NODE;
// Descriptor Data structures
// Layout Node has the core layout data
typedef struct _LAYOUT_NODE {
- VK_DESCRIPTOR_SET_LAYOUT layout;
- VK_DESCRIPTOR_TYPE* pTypes; // Dynamic array that will be created to verify descriptor types
- VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO createInfo;
+ VkDescriptorSetLayout layout;
+ VkDescriptorType* pTypes; // Dynamic array that will be created to verify descriptor types
+ VkDescriptorSetLayoutCreateInfo createInfo;
uint32_t startIndex; // 1st index of this layout
uint32_t endIndex; // last index of this layout
} LAYOUT_NODE;
typedef struct _SET_NODE {
- VK_DESCRIPTOR_SET set;
- VK_DESCRIPTOR_POOL pool;
- VK_DESCRIPTOR_SET_USAGE setUsage;
+ VkDescriptorSet set;
+ VkDescriptorPool pool;
+ VkDescriptorSetUsage setUsage;
// Head of LL of all Update structs for this set
GENERIC_HEADER* pUpdateStructs;
// Total num of descriptors in this set (count of its layout plus all prior layouts)
@@ -153,10 +153,10 @@ typedef struct _SET_NODE {
} SET_NODE;
typedef struct _POOL_NODE {
- VK_DESCRIPTOR_POOL pool;
- VK_DESCRIPTOR_POOL_USAGE poolUsage;
+ VkDescriptorPool pool;
+ VkDescriptorPoolUsage poolUsage;
uint32_t maxSets;
- VK_DESCRIPTOR_POOL_CREATE_INFO createInfo;
+ VkDescriptorPoolCreateInfo createInfo;
bool32_t updateActive; // Track if Pool is in an update block
SET_NODE* pSets; // Head of LL of sets for this Pool
} POOL_NODE;
@@ -218,10 +218,10 @@ typedef enum _CB_STATE
} CB_STATE;
// Cmd Buffer Wrapper Struct
typedef struct _GLOBAL_CB_NODE {
- VK_CMD_BUFFER cmdBuffer;
+ VkCmdBuffer cmdBuffer;
uint32_t queueNodeIndex;
- VK_FLAGS flags;
- VK_FENCE fence; // fence tracking this cmd buffer
+ VkFlags flags;
+ VkFence fence; // fence tracking this cmd buffer
uint64_t numCmds; // number of cmds in this CB
uint64_t drawCount[NUM_DRAW_TYPES]; // Count of each type of draw in this CB
CB_STATE state; // Track if cmd buffer update status
@@ -229,12 +229,12 @@ typedef struct _GLOBAL_CB_NODE {
// Currently storing "lastBound" objects on per-CB basis
// long-term may want to create caches of "lastBound" states and could have
// each individual CMD_NODE referencing its own "lastBound" state
- VK_PIPELINE lastBoundPipeline;
+ VkPipeline lastBoundPipeline;
uint32_t lastVtxBinding;
DYNAMIC_STATE_NODE* lastBoundDynamicState[VK_NUM_STATE_BIND_POINT];
- VK_DESCRIPTOR_SET lastBoundDescriptorSet;
- VK_RENDER_PASS activeRenderPass;
- VK_FRAMEBUFFER framebuffer;
+ VkDescriptorSet lastBoundDescriptorSet;
+ VkRenderPass activeRenderPass;
+ VkFramebuffer framebuffer;
} GLOBAL_CB_NODE;
//prototypes for extension functions
diff --git a/layers/glave_snapshot.c b/layers/glave_snapshot.c
index b4a976e8..5180ed70 100644
--- a/layers/glave_snapshot.c
+++ b/layers/glave_snapshot.c
@@ -139,19 +139,19 @@ void glv_deepfree_VkDeviceCreateInfo(VkDeviceCreateInfo* pCreateInfo)
free(pCreateInfo);
}
-void glv_vk_snapshot_copy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pDest, VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+void glv_vk_snapshot_copy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pDest, VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
pDest->gpu = gpu;
pDest->pCreateInfo = glv_deepcopy_VkDeviceCreateInfo(pCreateInfo);
- pDest->pDevice = (VK_DEVICE*)malloc(sizeof(VK_DEVICE));
+ pDest->pDevice = (VkDevice*)malloc(sizeof(VkDevice));
*pDest->pDevice = *pDevice;
}
void glv_vk_snapshot_destroy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pSrc)
{
- memset(&pSrc->gpu, 0, sizeof(VK_PHYSICAL_GPU));
+ memset(&pSrc->gpu, 0, sizeof(VkPhysicalGpu));
glv_deepfree_VkDeviceCreateInfo(pSrc->pCreateInfo);
pSrc->pCreateInfo = NULL;
@@ -267,7 +267,7 @@ static void snapshot_insert_deleted_object(GLV_VK_SNAPSHOT* pSnapshot, void* pOb
}
// Note: the parameters after pSnapshot match the order of vkCreateDevice(..)
-static void snapshot_insert_device(GLV_VK_SNAPSHOT* pSnapshot, VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+static void snapshot_insert_device(GLV_VK_SNAPSHOT* pSnapshot, VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
GLV_VK_SNAPSHOT_LL_NODE* pNode = snapshot_insert_object(pSnapshot, *pDevice, VK_OBJECT_TYPE_DEVICE);
pNode->obj.pStruct = malloc(sizeof(GLV_VK_SNAPSHOT_DEVICE_NODE));
@@ -283,7 +283,7 @@ static void snapshot_insert_device(GLV_VK_SNAPSHOT* pSnapshot, VK_PHYSICAL_GPU g
pSnapshot->deviceCount++;
}
-static void snapshot_remove_device(GLV_VK_SNAPSHOT* pSnapshot, VK_DEVICE device)
+static void snapshot_remove_device(GLV_VK_SNAPSHOT* pSnapshot, VkDevice device)
{
GLV_VK_SNAPSHOT_LL_NODE* pFoundObject = snapshot_remove_object(pSnapshot, device);
@@ -331,7 +331,7 @@ static void snapshot_remove_device(GLV_VK_SNAPSHOT* pSnapshot, VK_DEVICE device)
}
// Traverse global list and return type for given object
-static VK_OBJECT_TYPE ll_get_obj_type(VK_OBJECT object) {
+static VK_OBJECT_TYPE ll_get_obj_type(VkObject object) {
GLV_VK_SNAPSHOT_LL_NODE *pTrav = s_delta.pGlobalObjs;
while (pTrav) {
if (pTrav->obj.pVkObject == object)
@@ -386,7 +386,7 @@ static void set_status(void* pObj, VK_OBJECT_TYPE objType, OBJECT_STATUS status_
}
// Track selected state for an object node
-static void track_object_status(void* pObj, VK_STATE_BIND_POINT stateBindPoint) {
+static void track_object_status(void* pObj, VkStateBindPoint stateBindPoint) {
GLV_VK_SNAPSHOT_LL_NODE *pTrav = s_delta.pObjectHead[VK_OBJECT_TYPE_CMD_BUFFER];
while (pTrav) {
@@ -447,11 +447,11 @@ static void initGlaveSnapshot(void)
g_logFile = stdout;
}
- vkGetProcAddrType fpNextGPA;
+ PFN_vkGetProcAddr fpNextGPA;
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
if (!objLockInitialized)
{
// TODO/TBD: Need to delete this mutex sometime. How???
@@ -463,48 +463,48 @@ static void initGlaveSnapshot(void)
//=============================================================================
// vulkan entrypoints
//=============================================================================
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, VK_INSTANCE* pInstance)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance)
{
- VK_RESULT result = nextTable.CreateInstance(pCreateInfo, pInstance);
+ VkResult result = nextTable.CreateInstance(pCreateInfo, pInstance);
loader_platform_thread_lock_mutex(&objLock);
snapshot_insert_object(&s_delta, *pInstance, VK_OBJECT_TYPE_INSTANCE);
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyInstance(VK_INSTANCE instance)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyInstance(VkInstance instance)
{
- VK_RESULT result = nextTable.DestroyInstance(instance);
+ VkResult result = nextTable.DestroyInstance(instance);
loader_platform_thread_lock_mutex(&objLock);
snapshot_remove_object(&s_delta, (void*)instance);
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateGpus(VK_INSTANCE instance, uint32_t maxGpus, uint32_t* pGpuCount, VK_PHYSICAL_GPU* pGpus)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateGpus(VkInstance instance, uint32_t maxGpus, uint32_t* pGpuCount, VkPhysicalGpu* pGpus)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)instance, VK_OBJECT_TYPE_INSTANCE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.EnumerateGpus(instance, maxGpus, pGpuCount, pGpus);
+ VkResult result = nextTable.EnumerateGpus(instance, maxGpus, pGpuCount, pGpus);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetGpuInfo(VK_PHYSICAL_GPU gpu, VK_PHYSICAL_GPU_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetGpuInfo(VkPhysicalGpu gpu, VkPhysicalGpuInfoType infoType, size_t* pDataSize, void* pData)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.GetGpuInfo((VK_PHYSICAL_GPU)gpuw->nextObject, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetGpuInfo((VkPhysicalGpu)gpuw->nextObject, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = nextTable.CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -514,9 +514,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDevi
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyDevice(VkDevice device)
{
- VK_RESULT result = nextTable.DestroyDevice(device);
+ VkResult result = nextTable.DestroyDevice(device);
loader_platform_thread_lock_mutex(&objLock);
snapshot_remove_device(&s_delta, device);
loader_platform_thread_unlock_mutex(&objLock);
@@ -541,7 +541,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
loader_platform_thread_lock_mutex(&objLock);
@@ -549,11 +549,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
loader_platform_thread_unlock_mutex(&objLock);
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ VkResult result = nextTable.GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL) {
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
@@ -562,7 +562,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
loader_platform_thread_unlock_mutex(&objLock);
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = nextTable.EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
return result;
} else {
if (pOutLayerCount == NULL || pOutLayers == NULL || pOutLayers[0] == NULL)
@@ -574,43 +574,43 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(VK_DEVICE device, uint32_t queueNodeIndex, uint32_t queueIndex, VK_QUEUE* pQueue)
+VK_LAYER_EXPORT VkResult VKAPI vkGetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue* pQueue)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
+ VkResult result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSubmit(VK_QUEUE queue, uint32_t cmdBufferCount, const VK_CMD_BUFFER* pCmdBuffers, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSubmit(VkQueue queue, uint32_t cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence)
{
set_status((void*)fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
- VK_RESULT result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
+ VkResult result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(VK_QUEUE queue)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueWaitIdle(VkQueue queue)
{
- VK_RESULT result = nextTable.QueueWaitIdle(queue);
+ VkResult result = nextTable.QueueWaitIdle(queue);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDeviceWaitIdle(VkDevice device)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.DeviceWaitIdle(device);
+ VkResult result = nextTable.DeviceWaitIdle(device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAllocInfo* pAllocInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.AllocMemory(device, pAllocInfo, pMem);
+ VkResult result = nextTable.AllocMemory(device, pAllocInfo, pMem);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -621,54 +621,54 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAl
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkFreeMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkGpuMemory mem)
{
- VK_RESULT result = nextTable.FreeMemory(mem);
+ VkResult result = nextTable.FreeMemory(mem);
loader_platform_thread_lock_mutex(&objLock);
snapshot_remove_object(&s_delta, (void*)mem);
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkSetMemoryPriority(VK_GPU_MEMORY mem, VK_MEMORY_PRIORITY priority)
+VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkGpuMemory mem, VkMemoryPriority priority)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)mem, VK_OBJECT_TYPE_GPU_MEMORY);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.SetMemoryPriority(mem, priority);
+ VkResult result = nextTable.SetMemoryPriority(mem, priority);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkMapMemory(VK_GPU_MEMORY mem, VK_FLAGS flags, void** ppData)
+VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkGpuMemory mem, VkFlags flags, void** ppData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)mem, VK_OBJECT_TYPE_GPU_MEMORY);
loader_platform_thread_unlock_mutex(&objLock);
set_status((void*)mem, VK_OBJECT_TYPE_GPU_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
- VK_RESULT result = nextTable.MapMemory(mem, flags, ppData);
+ VkResult result = nextTable.MapMemory(mem, flags, ppData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkUnmapMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkGpuMemory mem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)mem, VK_OBJECT_TYPE_GPU_MEMORY);
loader_platform_thread_unlock_mutex(&objLock);
reset_status((void*)mem, VK_OBJECT_TYPE_GPU_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
- VK_RESULT result = nextTable.UnmapMemory(mem);
+ VkResult result = nextTable.UnmapMemory(mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkPinSystemMemory(VK_DEVICE device, const void* pSysMem, size_t memSize, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
+ VkResult result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetMultiGpuCompatibility(VK_PHYSICAL_GPU gpu0, VK_PHYSICAL_GPU gpu1, VK_GPU_COMPATIBILITY_INFO* pInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkGetMultiGpuCompatibility(VkPhysicalGpu gpu0, VkPhysicalGpu gpu1, VkGpuCompatibilityInfo* pInfo)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu0;
loader_platform_thread_lock_mutex(&objLock);
@@ -676,97 +676,97 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetMultiGpuCompatibility(VK_PHYSICAL_GPU gpu0,
loader_platform_thread_unlock_mutex(&objLock);
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.GetMultiGpuCompatibility((VK_PHYSICAL_GPU)gpuw->nextObject, gpu1, pInfo);
+ VkResult result = nextTable.GetMultiGpuCompatibility((VkPhysicalGpu)gpuw->nextObject, gpu1, pInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(VK_DEVICE device, const VK_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedSemaphore(VK_DEVICE device, const VK_SEMAPHORE_OPEN_INFO* pOpenInfo, VK_SEMAPHORE* pSemaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedSemaphore(VkDevice device, const VkSemaphoreOpenInfo* pOpenInfo, VkSemaphore* pSemaphore)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.OpenSharedSemaphore(device, pOpenInfo, pSemaphore);
+ VkResult result = nextTable.OpenSharedSemaphore(device, pOpenInfo, pSemaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(VK_DEVICE device, const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerImage(VK_DEVICE device, const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
+ VkResult result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object)
{
- VK_RESULT result = nextTable.DestroyObject(object);
+ VkResult result = nextTable.DestroyObject(object);
loader_platform_thread_lock_mutex(&objLock);
snapshot_remove_object(&s_delta, (void*)object);
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetObjectInfo(VK_BASE_OBJECT object, VK_OBJECT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)object, ll_get_obj_type(object));
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemory(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_MEMORY mem, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)object, ll_get_obj_type(object));
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
+ VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemoryRange(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_SIZE rangeOffset, VK_GPU_SIZE rangeSize, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemoryRange(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)object, ll_get_obj_type(object));
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
+ VkResult result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindImageMemoryRange(VK_IMAGE image, uint32_t allocationIdx, const VK_IMAGE_MEMORY_BIND_INFO* bindInfo, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindImageMemoryRange(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)image, VK_OBJECT_TYPE_IMAGE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset);
+ VkResult result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFence(VK_DEVICE device, const VK_FENCE_CREATE_INFO* pCreateInfo, VK_FENCE* pFence)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateFence(device, pCreateInfo, pFence);
+ VkResult result = nextTable.CreateFence(device, pCreateInfo, pFence);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -777,31 +777,31 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFence(VK_DEVICE device, const VK_FENCE_C
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFenceStatus(VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFenceStatus(VkFence fence)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)fence, VK_OBJECT_TYPE_FENCE);
loader_platform_thread_unlock_mutex(&objLock);
// Warn if submitted_flag is not set
- VK_RESULT result = nextTable.GetFenceStatus(fence);
+ VkResult result = nextTable.GetFenceStatus(fence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWaitForFences(VK_DEVICE device, uint32_t fenceCount, const VK_FENCE* pFences, bool32_t waitAll, uint64_t timeout)
+VK_LAYER_EXPORT VkResult VKAPI vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, bool32_t waitAll, uint64_t timeout)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
+ VkResult result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSemaphore(VK_DEVICE device, const VK_SEMAPHORE_CREATE_INFO* pCreateInfo, VK_SEMAPHORE* pSemaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateSemaphore(device, pCreateInfo, pSemaphore);
+ VkResult result = nextTable.CreateSemaphore(device, pCreateInfo, pSemaphore);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -812,24 +812,24 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSemaphore(VK_DEVICE device, const VK_SEM
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSignalSemaphore(VK_QUEUE queue, VK_SEMAPHORE semaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSignalSemaphore(VkQueue queue, VkSemaphore semaphore)
{
- VK_RESULT result = nextTable.QueueSignalSemaphore(queue, semaphore);
+ VkResult result = nextTable.QueueSignalSemaphore(queue, semaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitSemaphore(VK_QUEUE queue, VK_SEMAPHORE semaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueWaitSemaphore(VkQueue queue, VkSemaphore semaphore)
{
- VK_RESULT result = nextTable.QueueWaitSemaphore(queue, semaphore);
+ VkResult result = nextTable.QueueWaitSemaphore(queue, semaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateEvent(VK_DEVICE device, const VK_EVENT_CREATE_INFO* pCreateInfo, VK_EVENT* pEvent)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
+ VkResult result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -840,39 +840,39 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateEvent(VK_DEVICE device, const VK_EVENT_C
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetEventStatus(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkGetEventStatus(VkEvent event)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)event, VK_OBJECT_TYPE_EVENT);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetEventStatus(event);
+ VkResult result = nextTable.GetEventStatus(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkSetEvent(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkSetEvent(VkEvent event)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)event, VK_OBJECT_TYPE_EVENT);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.SetEvent(event);
+ VkResult result = nextTable.SetEvent(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetEvent(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkResetEvent(VkEvent event)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)event, VK_OBJECT_TYPE_EVENT);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.ResetEvent(event);
+ VkResult result = nextTable.ResetEvent(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateQueryPool(VK_DEVICE device, const VK_QUERY_POOL_CREATE_INFO* pCreateInfo, VK_QUERY_POOL* pQueryPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
+ VkResult result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -883,30 +883,30 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateQueryPool(VK_DEVICE device, const VK_QUE
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetQueryPoolResults(VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetQueryPoolResults(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)queryPool, VK_OBJECT_TYPE_QUERY_POOL);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData);
+ VkResult result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFormatInfo(VK_DEVICE device, VK_FORMAT format, VK_FORMAT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFormatInfo(VkDevice device, VkFormat format, VkFormatInfoType infoType, size_t* pDataSize, void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetFormatInfo(device, format, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetFormatInfo(device, format, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferCreateInfo* pCreateInfo, VK_BUFFER* pBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
+ VkResult result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -917,12 +917,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferC
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBufferViewCreateInfo* pCreateInfo, VK_BUFFER_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateBufferView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateBufferView(device, pCreateInfo, pView);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -933,12 +933,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBuf
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateImage(device, pCreateInfo, pImage);
+ VkResult result = nextTable.CreateImage(device, pCreateInfo, pImage);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -949,21 +949,21 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImage(VK_DEVICE device, const VK_IMAGE_C
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(VK_IMAGE image, const VK_IMAGE_SUBRESOURCE* pSubresource, VK_SUBRESOURCE_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetImageSubresourceInfo(VkImage image, const VkImageSubresource* pSubresource, VkSubresourceInfoType infoType, size_t* pDataSize, void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)image, VK_OBJECT_TYPE_IMAGE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.GetImageSubresourceInfo(image, pSubresource, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetImageSubresourceInfo(image, pSubresource, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo, VK_IMAGE_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateImageView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateImageView(device, pCreateInfo, pView);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -974,12 +974,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMA
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(VK_DEVICE device, const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo, VK_COLOR_ATTACHMENT_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateColorAttachmentView(VkDevice device, const VkColorAttachmentViewCreateInfo* pCreateInfo, VkColorAttachmentView* pView)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -990,12 +990,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(VK_DEVICE device, co
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(VK_DEVICE device, const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo, VK_DEPTH_STENCIL_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDepthStencilView(VkDevice device, const VkDepthStencilViewCreateInfo* pCreateInfo, VkDepthStencilView* pView)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1006,12 +1006,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(VK_DEVICE device, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateShader(VK_DEVICE device, const VK_SHADER_CREATE_INFO* pCreateInfo, VK_SHADER* pShader)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateShader(VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateShader(device, pCreateInfo, pShader);
+ VkResult result = nextTable.CreateShader(device, pCreateInfo, pShader);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1022,12 +1022,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateShader(VK_DEVICE device, const VK_SHADER
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1038,12 +1038,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(VK_DEVICE device, const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateComputePipeline(VkDevice device, const VkComputePipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1054,30 +1054,30 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(VK_DEVICE device, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkStorePipeline(VK_PIPELINE pipeline, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkStorePipeline(VkPipeline pipeline, size_t* pDataSize, void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)pipeline, VK_OBJECT_TYPE_PIPELINE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.StorePipeline(pipeline, pDataSize, pData);
+ VkResult result = nextTable.StorePipeline(pipeline, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkLoadPipeline(VK_DEVICE device, size_t dataSize, const void* pData, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkLoadPipeline(VkDevice device, size_t dataSize, const void* pData, VkPipeline* pPipeline)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.LoadPipeline(device, dataSize, pData, pPipeline);
+ VkResult result = nextTable.LoadPipeline(device, dataSize, pData, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPLER_CREATE_INFO* pCreateInfo, VK_SAMPLER* pSampler)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
+ VkResult result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1088,12 +1088,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPL
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout( VK_DEVICE device, const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_SET_LAYOUT* pSetLayout)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorSetLayout( VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
+ VkResult result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1104,30 +1104,30 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout( VK_DEVICE device, c
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(VK_DEVICE device, VK_DESCRIPTOR_UPDATE_MODE updateMode)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginDescriptorPoolUpdate(VkDevice device, VkDescriptorUpdateMode updateMode)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
+ VkResult result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(VK_DEVICE device, VK_CMD_BUFFER cmd)
+VK_LAYER_EXPORT VkResult VKAPI vkEndDescriptorPoolUpdate(VkDevice device, VkCmdBuffer cmd)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.EndDescriptorPoolUpdate(device, cmd);
+ VkResult result = nextTable.EndDescriptorPoolUpdate(device, cmd);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESCRIPTOR_POOL_USAGE poolUsage, uint32_t maxSets, const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_POOL* pDescriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorPool(VkDevice device, VkDescriptorPoolUsage poolUsage, uint32_t maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
+ VkResult result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1138,21 +1138,21 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESC
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetDescriptorPool(VK_DESCRIPTOR_POOL descriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkResetDescriptorPool(VkDescriptorPool descriptorPool)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)descriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.ResetDescriptorPool(descriptorPool);
+ VkResult result = nextTable.ResetDescriptorPool(descriptorPool);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, VK_DESCRIPTOR_SET_USAGE setUsage, uint32_t count, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts, VK_DESCRIPTOR_SET* pDescriptorSets, uint32_t* pCount)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocDescriptorSets(VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, uint32_t count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, uint32_t* pCount)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)descriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
+ VkResult result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
if (result == VK_SUCCESS)
{
for (uint32_t i = 0; i < *pCount; i++) {
@@ -1165,7 +1165,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(VK_DESCRIPTOR_POOL descrip
return result;
}
-VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets)
+VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)descriptorPool, VK_OBJECT_TYPE_DESCRIPTOR_POOL);
@@ -1173,7 +1173,7 @@ VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VK_DESCRIPTOR_POOL descriptorPo
nextTable.ClearDescriptorSets(descriptorPool, count, pDescriptorSets);
}
-VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VK_DESCRIPTOR_SET descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
+VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)descriptorSet, VK_OBJECT_TYPE_DESCRIPTOR_SET);
@@ -1181,12 +1181,12 @@ VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VK_DESCRIPTOR_SET descriptorSet,
nextTable.UpdateDescriptors(descriptorSet, updateCount, ppUpdateArray);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_VP_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicViewportState(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpStateObject* pState)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1197,12 +1197,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, c
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_RS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicRasterState(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsStateObject* pState)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1213,12 +1213,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, con
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device, const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_CB_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbStateObject* pState)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1229,12 +1229,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device,
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE device, const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_DS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo, VkDynamicDsStateObject* pState)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1245,12 +1245,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE devic
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, VK_CMD_BUFFER* pCmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateCommandBuffer(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
+ VkResult result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1261,16 +1261,16 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginCommandBuffer(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
+ VkResult result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkEndCommandBuffer(VkCmdBuffer cmdBuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1279,20 +1279,20 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
OBJSTATUS_RASTER_BOUND |
OBJSTATUS_COLOR_BLEND_BOUND |
OBJSTATUS_DEPTH_STENCIL_BOUND));
- VK_RESULT result = nextTable.EndCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.EndCommandBuffer(cmdBuffer);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkResetCommandBuffer(VkCmdBuffer cmdBuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.ResetCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.ResetCommandBuffer(cmdBuffer);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_PIPELINE pipeline)
+VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1300,7 +1300,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELIN
nextTable.CmdBindPipeline(cmdBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer, VK_STATE_BIND_POINT stateBindPoint, VK_DYNAMIC_STATE_OBJECT state)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1309,7 +1309,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdBindDynamicStateObject(cmdBuffer, stateBindPoint, state);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain, uint32_t layoutChainSlot, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets, const uint32_t* pUserData)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkDescriptorSetLayoutChain layoutChain, uint32_t layoutChainSlot, uint32_t count, const VkDescriptorSet* pDescriptorSets, const uint32_t* pUserData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1317,7 +1317,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layoutChain, layoutChainSlot, count, pDescriptorSets, pUserData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t binding)
+VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t binding)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1325,7 +1325,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUF
nextTable.CmdBindVertexBuffer(cmdBuffer, buffer, offset, binding);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, VK_INDEX_TYPE indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1333,7 +1333,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFF
nextTable.CmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDraw(VK_CMD_BUFFER cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDraw(VkCmdBuffer cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1341,7 +1341,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDraw(VK_CMD_BUFFER cmdBuffer, uint32_t firstVert
nextTable.CmdDraw(cmdBuffer, firstVertex, vertexCount, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VK_CMD_BUFFER cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VkCmdBuffer cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1349,7 +1349,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VK_CMD_BUFFER cmdBuffer, uint32_t fi
nextTable.CmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1357,7 +1357,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1365,7 +1365,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VK_CMD_BUFFER cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1373,7 +1373,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VK_CMD_BUFFER cmdBuffer, uint32_t x, ui
nextTable.CmdDispatch(cmdBuffer, x, y, z);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1381,7 +1381,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUF
nextTable.CmdDispatchIndirect(cmdBuffer, buffer, offset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1389,7 +1389,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER sr
nextTable.CmdCopyBuffer(cmdBuffer, srcBuffer, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageCopy* pRegions)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1397,7 +1397,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcI
nextTable.CmdCopyImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1405,7 +1405,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer, VK_BU
nextTable.CmdCopyBufferToImage(cmdBuffer, srcBuffer, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1413,7 +1413,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer, VK_IM
nextTable.CmdCopyImageToBuffer(cmdBuffer, srcImage, srcImageLayout, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout)
+VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1421,7 +1421,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1429,7 +1429,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1437,7 +1437,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER de
nextTable.CmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, VK_CLEAR_COLOR color, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, VkClearColor color, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1445,7 +1445,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer, VK_IMAG
nextTable.CmdClearColorImage(cmdBuffer, image, imageLayout, color, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1453,7 +1453,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer, VK_IM
nextTable.CmdClearDepthStencil(cmdBuffer, image, imageLayout, depth, stencil, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t rectCount, const VK_IMAGE_RESOLVE* pRects)
+VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t rectCount, const VkImageResolve* pRects)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1461,7 +1461,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE s
nextTable.CmdResolveImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, rectCount, pRects);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1469,7 +1469,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event
nextTable.CmdSetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1477,7 +1477,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT eve
nextTable.CmdResetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVENT_WAIT_INFO* pWaitInfo)
+VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1485,7 +1485,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVE
nextTable.CmdWaitEvents(cmdBuffer, pWaitInfo);
}
-VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const VK_PIPELINE_BARRIER* pBarrier)
+VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1493,7 +1493,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdPipelineBarrier(cmdBuffer, pBarrier);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot, VK_FLAGS flags)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1501,7 +1501,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POO
nextTable.CmdBeginQuery(cmdBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot)
+VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1509,7 +1509,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL
nextTable.CmdEndQuery(cmdBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount)
+VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1517,7 +1517,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMESTAMP_TYPE timestampType, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1525,7 +1525,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMES
nextTable.CmdWriteTimestamp(cmdBuffer, timestampType, destBuffer, destOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1533,7 +1533,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdInitAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER srcBuffer, VK_GPU_SIZE srcOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1541,7 +1541,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdLoadAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, srcBuffer, srcOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1549,12 +1549,12 @@ VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdSaveAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, destBuffer, destOffset);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(VK_DEVICE device, const VK_FRAMEBUFFER_CREATE_INFO* pCreateInfo, VK_FRAMEBUFFER* pFramebuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
+ VkResult result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1565,12 +1565,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(VK_DEVICE device, const VK_F
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCreateInfo, VK_RENDER_PASS* pRenderPass)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
+ VkResult result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
if (result == VK_SUCCESS)
{
loader_platform_thread_lock_mutex(&objLock);
@@ -1581,7 +1581,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateRenderPass(VK_DEVICE device, const VK_RE
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const VK_RENDER_PASS_BEGIN *pRenderPassBegin)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VkCmdBuffer cmdBuffer, const VkRenderPassBegin *pRenderPassBegin)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1589,7 +1589,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdBeginRenderPass(cmdBuffer, pRenderPassBegin);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VK_CMD_BUFFER cmdBuffer, VK_RENDER_PASS renderPass)
+VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VkCmdBuffer cmdBuffer, VkRenderPass renderPass)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1597,16 +1597,16 @@ VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VK_CMD_BUFFER cmdBuffer, VK_RENDER
nextTable.CmdEndRenderPass(cmdBuffer, renderPass);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetValidationLevel(VK_DEVICE device, VK_VALIDATION_LEVEL validationLevel)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetValidationLevel(VkDevice device, VkValidationLevel validationLevel)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.DbgSetValidationLevel(device, validationLevel);
+ VkResult result = nextTable.DbgSetValidationLevel(device, validationLevel);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
{
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));
@@ -1619,11 +1619,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, V
// force callbacks if DebugAction hasn't been set already other than initial value
if (g_actionIsDefault) {
g_debugAction = VK_DBG_LAYER_ACTION_CALLBACK;
- } VK_RESULT result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
+ } VkResult result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
VK_LAYER_DBG_FUNCTION_NODE *pTrav = g_pDbgFunctionHead;
VK_LAYER_DBG_FUNCTION_NODE *pPrev = pTrav;
@@ -1645,44 +1645,44 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance,
else
g_debugAction &= ~VK_DBG_LAYER_ACTION_CALLBACK;
}
- VK_RESULT result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
+ VkResult result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(VK_DEVICE device, int32_t msgCode, VK_DBG_MSG_FILTER filter)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetMessageFilter(VkDevice device, int32_t msgCode, VK_DBG_MSG_FILTER filter)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.DbgSetMessageFilter(device, msgCode, filter);
+ VkResult result = nextTable.DbgSetMessageFilter(device, msgCode, filter);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetObjectTag(VK_BASE_OBJECT object, size_t tagSize, const void* pTag)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetObjectTag(VkBaseObject object, size_t tagSize, const void* pTag)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)object, ll_get_obj_type(object));
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.DbgSetObjectTag(object, tagSize, pTag);
+ VkResult result = nextTable.DbgSetObjectTag(object, tagSize, pTag);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(VK_INSTANCE instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetGlobalOption(VkInstance instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
{
- VK_RESULT result = nextTable.DbgSetGlobalOption(instance, dbgOption, dataSize, pData);
+ VkResult result = nextTable.DbgSetGlobalOption(instance, dbgOption, dataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(VK_DEVICE device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetDeviceOption(VkDevice device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.DbgSetDeviceOption(device, dbgOption, dataSize, pData);
+ VkResult result = nextTable.DbgSetDeviceOption(device, dbgOption, dataSize, pData);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VK_CMD_BUFFER cmdBuffer, const char* pMarker)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VkCmdBuffer cmdBuffer, const char* pMarker)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1690,7 +1690,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VK_CMD_BUFFER cmdBuffer, const ch
nextTable.CmdDbgMarkerBegin(cmdBuffer, pMarker);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VkCmdBuffer cmdBuffer)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)cmdBuffer, VK_OBJECT_TYPE_CMD_BUFFER);
@@ -1700,7 +1700,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VK_CMD_BUFFER cmdBuffer)
#if defined(__linux__) || defined(XCB_NVIDIA)
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11AssociateConnection(VK_PHYSICAL_GPU gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11AssociateConnection(VkPhysicalGpu gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
loader_platform_thread_lock_mutex(&objLock);
@@ -1708,25 +1708,25 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11AssociateConnection(VK_PHYSICAL_GPU gpu,
loader_platform_thread_unlock_mutex(&objLock);
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initGlaveSnapshot);
- VK_RESULT result = nextTable.WsiX11AssociateConnection((VK_PHYSICAL_GPU)gpuw->nextObject, pConnectionInfo);
+ VkResult result = nextTable.WsiX11AssociateConnection((VkPhysicalGpu)gpuw->nextObject, pConnectionInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11GetMSC(VK_DEVICE device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11GetMSC(VkDevice device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.WsiX11GetMSC(device, window, crtc, pMsc);
+ VkResult result = nextTable.WsiX11GetMSC(device, window, crtc, pMsc);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(VK_DEVICE device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkGpuMemory* pMem)
{
loader_platform_thread_lock_mutex(&objLock);
ll_increment_use_count((void*)device, VK_OBJECT_TYPE_DEVICE);
loader_platform_thread_unlock_mutex(&objLock);
- VK_RESULT result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
+ VkResult result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
if (result == VK_SUCCESS)
{
@@ -1743,9 +1743,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(VK_DEVICE device,
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(VK_QUEUE queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11QueuePresent(VkQueue queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VkFence fence)
{
- VK_RESULT result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
+ VkResult result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
return result;
}
@@ -1858,7 +1858,7 @@ uint64_t glvSnapshotGetObjectCount(VK_OBJECT_TYPE type)
return retVal;
}
-VK_RESULT glvSnapshotGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, GLV_VK_SNAPSHOT_OBJECT_NODE *pObjNodeArray)
+VkResult glvSnapshotGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, GLV_VK_SNAPSHOT_OBJECT_NODE *pObjNodeArray)
{
// This bool flags if we're pulling all objs or just a single class of objs
bool32_t bAllObjs = (type == VK_OBJECT_TYPE_ANY);
@@ -1892,7 +1892,7 @@ void glvSnapshotPrintObjects(void)
}
#include "vk_generic_intercept_proc_helper.h"
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
void* addr;
@@ -1927,7 +1927,7 @@ VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcN
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
}
}
diff --git a/layers/glave_snapshot.h b/layers/glave_snapshot.h
index f22915cc..f0de0e91 100644
--- a/layers/glave_snapshot.h
+++ b/layers/glave_snapshot.h
@@ -173,13 +173,13 @@ void glv_vk_malloc_and_copy(void** ppDest, size_t size, const void* pSrc);
typedef struct _GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS
{
- VK_PHYSICAL_GPU gpu;
+ VkPhysicalGpu gpu;
VkDeviceCreateInfo* pCreateInfo;
- VK_DEVICE* pDevice;
+ VkDevice* pDevice;
} GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS;
VkDeviceCreateInfo* glv_deepcopy_xgl_device_create_info(const VkDeviceCreateInfo* pSrcCreateInfo);void glv_deepfree_xgl_device_create_info(VkDeviceCreateInfo* pCreateInfo);
-void glv_vk_snapshot_copy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pDest, VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice);
+void glv_vk_snapshot_copy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pDest, VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice);
void glv_vk_snapshot_destroy_createdevice_params(GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS* pSrc);
//=============================================================================
@@ -195,10 +195,10 @@ typedef struct _GLV_VK_SNAPSHOT_OBJECT_NODE {
void* pStruct; //< optionally points to a device-specific struct (ie, GLV_VK_SNAPSHOT_DEVICE_NODE)
} GLV_VK_SNAPSHOT_OBJECT_NODE;
-// Node that stores information about an VK_DEVICE
+// Node that stores information about an VkDevice
typedef struct _GLV_VK_SNAPSHOT_DEVICE_NODE {
// This object
- VK_DEVICE device;
+ VkDevice device;
// CreateDevice parameters
GLV_VK_SNAPSHOT_CREATEDEVICE_PARAMS params;
@@ -291,12 +291,12 @@ void glvSnapshotClear(void);
GLV_VK_SNAPSHOT glvSnapshotMerge(const GLV_VK_SNAPSHOT * const pDelta, const GLV_VK_SNAPSHOT * const pSnapshot);
uint64_t glvSnapshotGetObjectCount(VK_OBJECT_TYPE type);
-VK_RESULT glvSnapshotGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, GLV_VK_SNAPSHOT_OBJECT_NODE* pObjNodeArray);
+VkResult glvSnapshotGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, GLV_VK_SNAPSHOT_OBJECT_NODE* pObjNodeArray);
void glvSnapshotPrintObjects(void);
// Func ptr typedefs
typedef uint64_t (*GLVSNAPSHOT_GET_OBJECT_COUNT)(VK_OBJECT_TYPE);
-typedef VK_RESULT (*GLVSNAPSHOT_GET_OBJECTS)(VK_OBJECT_TYPE, uint64_t, GLV_VK_SNAPSHOT_OBJECT_NODE*);
+typedef VkResult (*GLVSNAPSHOT_GET_OBJECTS)(VK_OBJECT_TYPE, uint64_t, GLV_VK_SNAPSHOT_OBJECT_NODE*);
typedef void (*GLVSNAPSHOT_PRINT_OBJECTS)(void);
typedef void (*GLVSNAPSHOT_START_TRACKING)(void);
typedef GLV_VK_SNAPSHOT (*GLVSNAPSHOT_GET_DELTA)(void);
diff --git a/layers/layers_msg.h b/layers/layers_msg.h
index b3525967..cda11051 100644
--- a/layers/layers_msg.h
+++ b/layers/layers_msg.h
@@ -33,8 +33,8 @@ static FILE *g_logFile = NULL;
// Utility function to handle reporting
// If callbacks are enabled, use them, otherwise use printf
static void layerCbMsg(VK_DBG_MSG_TYPE msgType,
- VK_VALIDATION_LEVEL validationLevel,
- VK_BASE_OBJECT srcObject,
+ VkValidationLevel validationLevel,
+ VkBaseObject srcObject,
size_t location,
int32_t msgCode,
const char* pLayerPrefix,
diff --git a/layers/mem_tracker.cpp b/layers/mem_tracker.cpp
index c05e07e2..bde5950a 100644
--- a/layers/mem_tracker.cpp
+++ b/layers/mem_tracker.cpp
@@ -50,18 +50,18 @@ static loader_platform_thread_mutex globalLock;
#define MAX_BINDING 0xFFFFFFFF
-map<VK_CMD_BUFFER, MT_CB_INFO*> cbMap;
-map<VK_GPU_MEMORY, MT_MEM_OBJ_INFO*> memObjMap;
-map<VK_OBJECT, MT_OBJ_INFO*> objectMap;
+map<VkCmdBuffer, MT_CB_INFO*> cbMap;
+map<VkGpuMemory, MT_MEM_OBJ_INFO*> memObjMap;
+map<VkObject, MT_OBJ_INFO*> objectMap;
map<uint64_t, MT_FENCE_INFO*> fenceMap; // Map fenceId to fence info
-map<VK_QUEUE, MT_QUEUE_INFO*> queueMap;
+map<VkQueue, MT_QUEUE_INFO*> queueMap;
// TODO : Add per-device fence completion
static uint64_t g_currentFenceId = 1;
-static VK_DEVICE globalDevice = NULL;
+static VkDevice globalDevice = NULL;
// Add new queue for this device to map container
-static void addQueueInfo(const VK_QUEUE queue)
+static void addQueueInfo(const VkQueue queue)
{
MT_QUEUE_INFO* pInfo = new MT_QUEUE_INFO;
pInfo->lastRetiredId = 0;
@@ -72,23 +72,23 @@ static void addQueueInfo(const VK_QUEUE queue)
static void deleteQueueInfoList(void)
{
// Process queue list, cleaning up each entry before deleting
- for (map<VK_QUEUE, MT_QUEUE_INFO*>::iterator ii=queueMap.begin(); ii!=queueMap.end(); ++ii) {
+ for (map<VkQueue, MT_QUEUE_INFO*>::iterator ii=queueMap.begin(); ii!=queueMap.end(); ++ii) {
(*ii).second->pQueueCmdBuffers.clear();
}
queueMap.clear();
}
// Add new CBInfo for this cb to map container
-static void addCBInfo(const VK_CMD_BUFFER cb)
+static void addCBInfo(const VkCmdBuffer cb)
{
MT_CB_INFO* pInfo = new MT_CB_INFO;
- memset(pInfo, 0, (sizeof(MT_CB_INFO) - sizeof(list<VK_GPU_MEMORY>)));
+ memset(pInfo, 0, (sizeof(MT_CB_INFO) - sizeof(list<VkGpuMemory>)));
pInfo->cmdBuffer = cb;
cbMap[cb] = pInfo;
}
// Return ptr to Info in CB map, or NULL if not found
-static MT_CB_INFO* getCBInfo(const VK_CMD_BUFFER cb)
+static MT_CB_INFO* getCBInfo(const VkCmdBuffer cb)
{
MT_CB_INFO* pCBInfo = NULL;
if (cbMap.find(cb) != cbMap.end()) {
@@ -98,7 +98,7 @@ static MT_CB_INFO* getCBInfo(const VK_CMD_BUFFER cb)
}
// Return object info for 'object' or return NULL if no info exists
-static MT_OBJ_INFO* getObjectInfo(const VK_OBJECT object)
+static MT_OBJ_INFO* getObjectInfo(const VkObject object)
{
MT_OBJ_INFO* pObjInfo = NULL;
@@ -108,7 +108,7 @@ static MT_OBJ_INFO* getObjectInfo(const VK_OBJECT object)
return pObjInfo;
}
-static MT_OBJ_INFO* addObjectInfo(VK_OBJECT object, VK_STRUCTURE_TYPE sType, const void *pCreateInfo, const int struct_size, const char *name_prefix)
+static MT_OBJ_INFO* addObjectInfo(VkObject object, VkStructureType sType, const void *pCreateInfo, const int struct_size, const char *name_prefix)
{
MT_OBJ_INFO* pInfo = new MT_OBJ_INFO;
memset(pInfo, 0, sizeof(MT_OBJ_INFO));
@@ -124,7 +124,7 @@ static MT_OBJ_INFO* addObjectInfo(VK_OBJECT object, VK_STRUCTURE_TYPE sType, con
}
// Add a fence, creating one if necessary to our list of fences/fenceIds
-static uint64_t addFenceInfo(VK_FENCE fence, VK_QUEUE queue)
+static uint64_t addFenceInfo(VkFence fence, VkQueue queue)
{
// Create fence object
MT_FENCE_INFO* pFenceInfo = new MT_FENCE_INFO;
@@ -133,12 +133,12 @@ static uint64_t addFenceInfo(VK_FENCE fence, VK_QUEUE queue)
memset(pFenceInfo, 0, sizeof(MT_FENCE_INFO));
// If no fence, create an internal fence to track the submissions
if (fence == NULL) {
- VK_FENCE_CREATE_INFO fci;
+ VkFenceCreateInfo fci;
fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fci.pNext = NULL;
- fci.flags = static_cast<VK_FENCE_CREATE_FLAGS>(0);
+ fci.flags = static_cast<VkFenceCreateFlags>(0);
nextTable.CreateFence(globalDevice, &fci, &pFenceInfo->fence);
- addObjectInfo(pFenceInfo->fence, fci.sType, &fci, sizeof(VK_FENCE_CREATE_INFO), "internalFence");
+ addObjectInfo(pFenceInfo->fence, fci.sType, &fci, sizeof(VkFenceCreateInfo), "internalFence");
pFenceInfo->localFence = VK_TRUE;
} else {
pFenceInfo->localFence = VK_FALSE;
@@ -171,11 +171,11 @@ static void deleteFenceInfo(uint64_t fenceId)
}
// Search through list for this fence, deleting all items before it (with lower IDs) and updating lastRetiredId
-static void updateFenceTracking(VK_FENCE fence)
+static void updateFenceTracking(VkFence fence)
{
MT_FENCE_INFO *pCurFenceInfo = NULL;
uint64_t fenceId = 0;
- VK_QUEUE queue = NULL;
+ VkQueue queue = NULL;
for (map<uint64_t, MT_FENCE_INFO*>::iterator ii=fenceMap.begin(); ii!=fenceMap.end(); ++ii) {
if ((*ii).second != NULL) {
@@ -214,9 +214,9 @@ static bool32_t fenceRetired(uint64_t fenceId)
}
// Return the fence associated with a fenceId
-static VK_FENCE getFenceFromId(uint64_t fenceId)
+static VkFence getFenceFromId(uint64_t fenceId)
{
- VK_FENCE fence = NULL;
+ VkFence fence = NULL;
if (fenceId != 0) {
// Search for an item with this fenceId
if (fenceMap.find(fenceId) != fenceMap.end()) {
@@ -233,7 +233,7 @@ static VK_FENCE getFenceFromId(uint64_t fenceId)
}
// Helper routine that updates the fence list for a specific queue to all-retired
-static void retireQueueFences(VK_QUEUE queue)
+static void retireQueueFences(VkQueue queue)
{
MT_QUEUE_INFO *pQueueInfo = queueMap[queue];
pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
@@ -253,11 +253,11 @@ static void retireQueueFences(VK_QUEUE queue)
}
// Helper routine that updates fence list for all queues to all-retired
-static void retireDeviceFences(VK_DEVICE device)
+static void retireDeviceFences(VkDevice device)
{
// Process each queue for device
// TODO: Add multiple device support
- for (map<VK_QUEUE, MT_QUEUE_INFO*>::iterator ii=queueMap.begin(); ii!=queueMap.end(); ++ii) {
+ for (map<VkQueue, MT_QUEUE_INFO*>::iterator ii=queueMap.begin(); ii!=queueMap.end(); ++ii) {
retireQueueFences((*ii).first);
}
}
@@ -265,11 +265,11 @@ static void retireDeviceFences(VK_DEVICE device)
// Returns True if a memory reference is present in a Queue's memory reference list
// Queue is validated by caller
static bool32_t checkMemRef(
- VK_QUEUE queue,
- VK_GPU_MEMORY mem)
+ VkQueue queue,
+ VkGpuMemory mem)
{
bool32_t result = VK_FALSE;
- list<VK_GPU_MEMORY>::iterator it;
+ list<VkGpuMemory>::iterator it;
MT_QUEUE_INFO *pQueueInfo = queueMap[queue];
for (it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
if ((*it) == mem) {
@@ -281,9 +281,9 @@ static bool32_t checkMemRef(
}
static bool32_t validateQueueMemRefs(
- VK_QUEUE queue,
+ VkQueue queue,
uint32_t cmdBufferCount,
- const VK_CMD_BUFFER *pCmdBuffers)
+ const VkCmdBuffer *pCmdBuffers)
{
bool32_t result = VK_TRUE;
@@ -305,7 +305,7 @@ static bool32_t validateQueueMemRefs(
result = VK_FALSE;
} else {
// Validate that all actual references are accounted for in pMemRefs
- for (list<VK_GPU_MEMORY>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
// Search for each memref in queues memreflist.
if (checkMemRef(queue, *it)) {
char str[1024];
@@ -335,7 +335,7 @@ static bool32_t validateQueueMemRefs(
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
-static MT_MEM_OBJ_INFO* getMemObjInfo(const VK_GPU_MEMORY mem)
+static MT_MEM_OBJ_INFO* getMemObjInfo(const VkGpuMemory mem)
{
MT_MEM_OBJ_INFO* pMemObjInfo = NULL;
@@ -345,7 +345,7 @@ static MT_MEM_OBJ_INFO* getMemObjInfo(const VK_GPU_MEMORY mem)
return pMemObjInfo;
}
-static void addMemObjInfo(const VK_GPU_MEMORY mem, const VkMemoryAllocInfo* pAllocInfo)
+static void addMemObjInfo(const VkGpuMemory mem, const VkMemoryAllocInfo* pAllocInfo)
{
MT_MEM_OBJ_INFO* pInfo = new MT_MEM_OBJ_INFO;
pInfo->refCount = 0;
@@ -362,7 +362,7 @@ static void addMemObjInfo(const VK_GPU_MEMORY mem, const VkMemoryAllocInfo* pAll
// Find CB Info and add mem binding to list container
// Find Mem Obj Info and add CB binding to list container
-static bool32_t updateCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
+static bool32_t updateCBBinding(const VkCmdBuffer cb, const VkGpuMemory mem)
{
bool32_t result = VK_TRUE;
// First update CB binding in MemObj mini CB list
@@ -375,7 +375,7 @@ static bool32_t updateCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
} else {
// Search for cmd buffer object in memory object's binding list
bool32_t found = VK_FALSE;
- for (list<VK_CMD_BUFFER>::iterator it = pMemInfo->pCmdBufferBindings.begin(); it != pMemInfo->pCmdBufferBindings.end(); ++it) {
+ for (list<VkCmdBuffer>::iterator it = pMemInfo->pCmdBufferBindings.begin(); it != pMemInfo->pCmdBufferBindings.end(); ++it) {
if ((*it) == cb) {
found = VK_TRUE;
break;
@@ -397,7 +397,7 @@ static bool32_t updateCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
} else {
// Search for memory object in cmd buffer's binding list
bool32_t found = VK_FALSE;
- for (list<VK_GPU_MEMORY>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
if ((*it) == mem) {
found = VK_TRUE;
break;
@@ -414,7 +414,7 @@ static bool32_t updateCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
// Clear the CB Binding for mem
// Calls to this function should be wrapped in mutex
-static void clearCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
+static void clearCBBinding(const VkCmdBuffer cb, const VkGpuMemory mem)
{
MT_MEM_OBJ_INFO* pInfo = getMemObjInfo(mem);
// TODO : Having this check is not ideal, really if memInfo was deleted,
@@ -427,7 +427,7 @@ static void clearCBBinding(const VK_CMD_BUFFER cb, const VK_GPU_MEMORY mem)
}
// Free bindings related to CB
-static bool32_t freeCBBindings(const VK_CMD_BUFFER cb)
+static bool32_t freeCBBindings(const VkCmdBuffer cb)
{
bool32_t result = VK_TRUE;
MT_CB_INFO* pCBInfo = getCBInfo(cb);
@@ -441,7 +441,7 @@ static bool32_t freeCBBindings(const VK_CMD_BUFFER cb)
deleteFenceInfo(pCBInfo->fenceId);
}
- for (list<VK_GPU_MEMORY>::iterator it=pCBInfo->pMemObjList.begin(); it!=pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkGpuMemory>::iterator it=pCBInfo->pMemObjList.begin(); it!=pCBInfo->pMemObjList.end(); ++it) {
clearCBBinding(cb, (*it));
}
pCBInfo->pMemObjList.clear();
@@ -452,7 +452,7 @@ static bool32_t freeCBBindings(const VK_CMD_BUFFER cb)
// Delete CBInfo from list along with all of it's mini MemObjInfo
// and also clear mem references to CB
// TODO : When should this be called? There's no Destroy of CBs that I see
-static bool32_t deleteCBInfo(const VK_CMD_BUFFER cb)
+static bool32_t deleteCBInfo(const VkCmdBuffer cb)
{
bool32_t result = VK_TRUE;
result = freeCBBindings(cb);
@@ -471,7 +471,7 @@ static bool32_t deleteCBInfo(const VK_CMD_BUFFER cb)
static bool32_t deleteCBInfoList()
{
bool32_t result = VK_TRUE;
- for (map<VK_CMD_BUFFER, MT_CB_INFO*>::iterator ii=cbMap.begin(); ii!=cbMap.end(); ++ii) {
+ for (map<VkCmdBuffer, MT_CB_INFO*>::iterator ii=cbMap.begin(); ii!=cbMap.end(); ++ii) {
freeCBBindings((*ii).first);
delete (*ii).second;
}
@@ -483,13 +483,13 @@ static void reportMemReferences(const MT_MEM_OBJ_INFO* pMemObjInfo)
{
uint32_t refCount = 0; // Count found references
- for (list<VK_CMD_BUFFER>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) {
+ for (list<VkCmdBuffer>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) {
refCount++;
char str[1024];
sprintf(str, "Command Buffer %p has reference to mem obj %p", (*it), pMemObjInfo->mem);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
}
- for (list<VK_OBJECT>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
+ for (list<VkObject>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
char str[1024];
sprintf(str, "VK Object %p has reference to mem obj %p", (*it), pMemObjInfo->mem);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
@@ -501,7 +501,7 @@ static void reportMemReferences(const MT_MEM_OBJ_INFO* pMemObjInfo)
}
}
-static void deleteMemObjInfo(VK_GPU_MEMORY mem)
+static void deleteMemObjInfo(VkGpuMemory mem)
{
MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem];
if (memObjMap.find(mem) != memObjMap.end()) {
@@ -512,7 +512,7 @@ static void deleteMemObjInfo(VK_GPU_MEMORY mem)
}
// Check if fence for given CB is completed
-static bool32_t checkCBCompleted(const VK_CMD_BUFFER cb)
+static bool32_t checkCBCompleted(const VkCmdBuffer cb)
{
bool32_t result = VK_TRUE;
MT_CB_INFO* pCBInfo = getCBInfo(cb);
@@ -532,7 +532,7 @@ static bool32_t checkCBCompleted(const VK_CMD_BUFFER cb)
return result;
}
-static bool32_t freeMemObjInfo(VK_GPU_MEMORY mem, bool internal)
+static bool32_t freeMemObjInfo(VkGpuMemory mem, bool internal)
{
bool32_t result = VK_TRUE;
// Parse global list to find info w/ mem
@@ -552,8 +552,8 @@ static bool32_t freeMemObjInfo(VK_GPU_MEMORY mem, bool internal)
// Clear any CB bindings for completed CBs
// TODO : Is there a better place to do this?
- list<VK_CMD_BUFFER>::iterator it = pInfo->pCmdBufferBindings.begin();
- list<VK_CMD_BUFFER>::iterator temp;
+ list<VkCmdBuffer>::iterator it = pInfo->pCmdBufferBindings.begin();
+ list<VkCmdBuffer>::iterator temp;
while (it != pInfo->pCmdBufferBindings.end()) {
if (VK_TRUE == checkCBCompleted(*it)) {
temp = it;
@@ -585,7 +585,7 @@ static bool32_t freeMemObjInfo(VK_GPU_MEMORY mem, bool internal)
// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
// 2. Decrement refCount for MemObjInfo
// 3. Clear MemObjInfo ptr from ObjectInfo
-static bool32_t clearObjectBinding(VK_OBJECT object)
+static bool32_t clearObjectBinding(VkObject object)
{
bool32_t result = VK_FALSE;
MT_OBJ_INFO* pObjInfo = getObjectInfo(object);
@@ -599,7 +599,7 @@ static bool32_t clearObjectBinding(VK_OBJECT object)
sprintf(str, "Attempting to clear mem binding on obj %p but it has no binding.", (void*)object);
layerCbMsg(VK_DBG_MSG_WARNING, VK_VALIDATION_LEVEL_0, object, 0, MEMTRACK_MEM_OBJ_CLEAR_EMPTY_BINDINGS, "MEM", str);
} else {
- for (list<VK_OBJECT>::iterator it = pObjInfo->pMemObjInfo->pObjBindings.begin(); it != pObjInfo->pMemObjInfo->pObjBindings.end(); ++it) {
+ for (list<VkObject>::iterator it = pObjInfo->pMemObjInfo->pObjBindings.begin(); it != pObjInfo->pMemObjInfo->pObjBindings.end(); ++it) {
pObjInfo->pMemObjInfo->refCount--;
pObjInfo->pMemObjInfo = NULL;
it = pObjInfo->pMemObjInfo->pObjBindings.erase(it);
@@ -623,7 +623,7 @@ static bool32_t clearObjectBinding(VK_OBJECT object)
// Add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
-static bool32_t updateObjectBinding(VK_OBJECT object, VK_GPU_MEMORY mem)
+static bool32_t updateObjectBinding(VkObject object, VkGpuMemory mem)
{
bool32_t result = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
@@ -646,7 +646,7 @@ static bool32_t updateObjectBinding(VK_OBJECT object, VK_GPU_MEMORY mem)
} else {
// Search for object in memory object's binding list
bool32_t found = VK_FALSE;
- for (list<VK_OBJECT>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
+ for (list<VkObject>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
if ((*it) == object) {
found = VK_TRUE;
break;
@@ -683,7 +683,7 @@ static void printObjList()
char str[1024];
sprintf(str, "Details of Object list of size %lu elements", objectMap.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (map<VK_OBJECT, MT_OBJ_INFO*>::iterator ii=objectMap.begin(); ii!=objectMap.end(); ++ii) {
+ for (map<VkObject, MT_OBJ_INFO*>::iterator ii=objectMap.begin(); ii!=objectMap.end(); ++ii) {
pInfo = (*ii).second;
sprintf(str, " ObjInfo %p has object %p, pMemObjInfo %p", pInfo, pInfo->object, pInfo->pMemObjInfo);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, pInfo->object, 0, MEMTRACK_NONE, "MEM", str);
@@ -691,9 +691,9 @@ static void printObjList()
}
// For given Object, get 'mem' obj that it's bound to or NULL if no binding
-static VK_GPU_MEMORY getMemBindingFromObject(const VK_OBJECT object)
+static VkGpuMemory getMemBindingFromObject(const VkObject object)
{
- VK_GPU_MEMORY mem = NULL;
+ VkGpuMemory mem = NULL;
MT_OBJ_INFO* pObjInfo = getObjectInfo(object);
if (pObjInfo) {
if (pObjInfo->pMemObjInfo) {
@@ -724,7 +724,7 @@ static void printMemList()
sprintf(str, "MEM INFO : Details of Memory Object list of size %lu elements", memObjMap.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (map<VK_GPU_MEMORY, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
+ for (map<VkGpuMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
pInfo = (*ii).second;
sprintf(str, " ===MemObjInfo at %p===", (void*)pInfo);
@@ -744,14 +744,14 @@ static void printMemList()
sprintf(str, " VK OBJECT Binding list of size %lu elements:", pInfo->pObjBindings.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (list<VK_OBJECT>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
+ for (list<VkObject>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
sprintf(str, " VK OBJECT %p", (*it));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
}
sprintf(str, " VK Command Buffer (CB) binding list of size %lu elements", pInfo->pCmdBufferBindings.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (list<VK_CMD_BUFFER>::iterator it = pInfo->pCmdBufferBindings.begin(); it != pInfo->pCmdBufferBindings.end(); ++it) {
+ for (list<VkCmdBuffer>::iterator it = pInfo->pCmdBufferBindings.begin(); it != pInfo->pCmdBufferBindings.end(); ++it) {
sprintf(str, " VK CB %p", (*it));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
}
@@ -765,7 +765,7 @@ static void printCBList()
sprintf(str, "Details of CB list of size %lu elements", cbMap.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (map<VK_CMD_BUFFER, MT_CB_INFO*>::iterator ii=cbMap.begin(); ii!=cbMap.end(); ++ii) {
+ for (map<VkCmdBuffer, MT_CB_INFO*>::iterator ii=cbMap.begin(); ii!=cbMap.end(); ++ii) {
pCBInfo = (*ii).second;
sprintf(str, " CB Info (%p) has CB %p, fenceId %" PRIx64", and fence %p",
@@ -773,7 +773,7 @@ static void printCBList()
(void*)getFenceFromId(pCBInfo->fenceId));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (list<VK_GPU_MEMORY>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
sprintf(str, " Mem obj %p", (*it));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
}
@@ -800,13 +800,13 @@ static void initMemTracker(void)
// initialize Layer dispatch table
// TODO handle multiple GPUs
- vkGetProcAddrType fpNextGPA;
+ PFN_vkGetProcAddr fpNextGPA;
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
- vkGetProcAddrType fpGetProcAddr = (vkGetProcAddrType)fpNextGPA((VK_PHYSICAL_GPU) pCurObj->nextObject, (char *) "vkGetProcAddr");
+ PFN_vkGetProcAddr fpGetProcAddr = (PFN_vkGetProcAddr)fpNextGPA((VkPhysicalGpu) pCurObj->nextObject, (char *) "vkGetProcAddr");
nextTable.GetProcAddr = fpGetProcAddr;
if (!globalLockInitialized)
@@ -821,18 +821,18 @@ static void initMemTracker(void)
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&g_initOnce, initMemTracker);
- VK_RESULT result = nextTable.CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = nextTable.CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
// Save off device in case we need it to create Fences
globalDevice = *pDevice;
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyDevice(VkDevice device)
{
char str[1024];
sprintf(str, "Printing List details prior to vkDestroyDevice()");
@@ -847,7 +847,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
}
// Report any memory leaks
MT_MEM_OBJ_INFO* pInfo = NULL;
- for (map<VK_GPU_MEMORY, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
+ for (map<VkGpuMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
pInfo = (*ii).second;
if (pInfo->allocInfo.allocationSize != 0) {
@@ -861,21 +861,21 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
deleteQueueInfoList();
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.DestroyDevice(device);
+ VkResult result = nextTable.DestroyDevice(device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
- VK_RESULT result;
+ VkResult result;
/* This entrypoint is NOT going to init its own dispatch table since loader calls here early */
if (!strcmp(pExtName, "MemTracker"))
{
result = VK_SUCCESS;
} else if (nextTable.GetExtensionSupport != NULL)
{
- result = nextTable.GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ result = nextTable.GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
} else
{
result = VK_ERROR_INVALID_EXTENSION;
@@ -883,7 +883,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount,
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount,
size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
@@ -891,7 +891,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&g_initOnce, initMemTracker);
- VK_RESULT result = nextTable.EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount,
+ VkResult result = nextTable.EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount,
maxStringSize, pOutLayerCount, pOutLayers, pReserved);
return result;
} else
@@ -905,9 +905,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(VK_DEVICE device, uint32_t queueNodeIndex, uint32_t queueIndex, VK_QUEUE* pQueue)
+VK_LAYER_EXPORT VkResult VKAPI vkGetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue* pQueue)
{
- VK_RESULT result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
+ VkResult result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
addQueueInfo(*pQueue);
@@ -916,9 +916,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(VK_DEVICE device, uint32_t queu
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueAddMemReference(VK_QUEUE queue, VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReference(VkQueue queue, VkGpuMemory mem)
{
- VK_RESULT result = nextTable.QueueAddMemReference(queue, mem);
+ VkResult result = nextTable.QueueAddMemReference(queue, mem);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -945,10 +945,10 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueAddMemReference(VK_QUEUE queue, VK_GPU_ME
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(VK_QUEUE queue, VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReference(VkQueue queue, VkGpuMemory mem)
{
// TODO : Decrement ref count for this memory reference on this queue. Remove if ref count is zero.
- VK_RESULT result = nextTable.QueueRemoveMemReference(queue, mem);
+ VkResult result = nextTable.QueueRemoveMemReference(queue, mem);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -959,7 +959,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(VK_QUEUE queue, VK_GPU
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, queue, 0, MEMTRACK_INVALID_QUEUE, "MEM", str);
}
else {
- for (list<VK_GPU_MEMORY>::iterator it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
+ for (list<VkGpuMemory>::iterator it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
if ((*it) == mem) {
it = pQueueInfo->pMemRefList.erase(it);
}
@@ -970,11 +970,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(VK_QUEUE queue, VK_GPU
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSubmit(
- VK_QUEUE queue,
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSubmit(
+ VkQueue queue,
uint32_t cmdBufferCount,
- const VK_CMD_BUFFER *pCmdBuffers,
- VK_FENCE fence)
+ const VkCmdBuffer *pCmdBuffers,
+ VkFence fence)
{
loader_platform_thread_lock_mutex(&globalLock);
// TODO : Need to track fence and clear mem references when fence clears
@@ -995,13 +995,13 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSubmit(
}
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, getFenceFromId(fenceId));
+ VkResult result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, getFenceFromId(fenceId));
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAllocInfo* pAllocInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem)
{
- VK_RESULT result = nextTable.AllocMemory(device, pAllocInfo, pMem);
+ VkResult result = nextTable.AllocMemory(device, pAllocInfo, pMem);
// TODO : Track allocations and overall size here
loader_platform_thread_lock_mutex(&globalLock);
addMemObjInfo(*pMem, pAllocInfo);
@@ -1010,7 +1010,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAl
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkFreeMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkGpuMemory mem)
{
/* From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed. Before
* freeing a memory object, an application must ensure the memory object is unbound from
@@ -1026,19 +1026,19 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkFreeMemory(VK_GPU_MEMORY mem)
printObjList();
printCBList();
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.FreeMemory(mem);
+ VkResult result = nextTable.FreeMemory(mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkSetMemoryPriority(VK_GPU_MEMORY mem, VK_MEMORY_PRIORITY priority)
+VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkGpuMemory mem, VkMemoryPriority priority)
{
// TODO : Update tracking for this alloc
// Make sure memory is not pinned, which can't have priority set
- VK_RESULT result = nextTable.SetMemoryPriority(mem, priority);
+ VkResult result = nextTable.SetMemoryPriority(mem, priority);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkMapMemory(VK_GPU_MEMORY mem, VK_FLAGS flags, void** ppData)
+VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkGpuMemory mem, VkFlags flags, void** ppData)
{
// TODO : Track when memory is mapped
loader_platform_thread_lock_mutex(&globalLock);
@@ -1049,54 +1049,54 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkMapMemory(VK_GPU_MEMORY mem, VK_FLAGS flags, v
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_INVALID_STATE, "MEM", str);
}
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.MapMemory(mem, flags, ppData);
+ VkResult result = nextTable.MapMemory(mem, flags, ppData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkUnmapMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkGpuMemory mem)
{
// TODO : Track as memory gets unmapped, do we want to check what changed following map?
// Make sure that memory was ever mapped to begin with
- VK_RESULT result = nextTable.UnmapMemory(mem);
+ VkResult result = nextTable.UnmapMemory(mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkPinSystemMemory(VK_DEVICE device, const void* pSysMem, size_t memSize, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem)
{
// TODO : Track this
// Verify that memory is actually pinnable
- VK_RESULT result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
+ VkResult result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(VK_DEVICE device, const VK_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
// TODO : Track this
- VK_RESULT result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(VK_DEVICE device, const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
// TODO : Track this
- VK_RESULT result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerImage(VK_DEVICE device, const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem)
{
// TODO : Track this
- VK_RESULT result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
+ VkResult result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object)
{
loader_platform_thread_lock_mutex(&globalLock);
// First check if this is a CmdBuffer
- if (NULL != getCBInfo((VK_CMD_BUFFER)object)) {
- deleteCBInfo((VK_CMD_BUFFER)object);
+ if (NULL != getCBInfo((VkCmdBuffer)object)) {
+ deleteCBInfo((VkCmdBuffer)object);
}
if (objectMap.find(object) != objectMap.end()) {
@@ -1104,7 +1104,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
if (pDelInfo->pMemObjInfo) {
// Wsi allocated Memory is tied to image object so clear the binding and free that memory automatically
if (0 == pDelInfo->pMemObjInfo->allocInfo.allocationSize) { // Wsi allocated memory has NULL allocInfo w/ 0 size
- VK_GPU_MEMORY memToFree = pDelInfo->pMemObjInfo->mem;
+ VkGpuMemory memToFree = pDelInfo->pMemObjInfo->mem;
clearObjectBinding(object);
freeMemObjInfo(memToFree, true);
}
@@ -1121,22 +1121,22 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
}
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.DestroyObject(object);
+ VkResult result = nextTable.DestroyObject(object);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetObjectInfo(VK_BASE_OBJECT object, VK_OBJECT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData)
{
// TODO : What to track here?
// Could potentially save returned mem requirements and validate values passed into BindObjectMemory for this object
// From spec : The only objects that are guaranteed to have no external memory requirements are devices, queues, command buffers, shaders and memory objects.
- VK_RESULT result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemory(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_MEMORY mem, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
{
- VK_RESULT result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
+ VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
loader_platform_thread_lock_mutex(&globalLock);
// Track objects tied to memory
if (VK_FALSE == updateObjectBinding(object, mem)) {
@@ -1150,20 +1150,20 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemory(VK_OBJECT object, uint32_t al
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFence(VK_DEVICE device, const VK_FENCE_CREATE_INFO* pCreateInfo, VK_FENCE* pFence)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence)
{
- VK_RESULT result = nextTable.CreateFence(device, pCreateInfo, pFence);
+ VkResult result = nextTable.CreateFence(device, pCreateInfo, pFence);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pFence, pCreateInfo->sType, pCreateInfo, sizeof(VK_FENCE_CREATE_INFO), "fence");
+ addObjectInfo(*pFence, pCreateInfo->sType, pCreateInfo, sizeof(VkFenceCreateInfo), "fence");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetFences(VK_DEVICE device, uint32_t fenceCount, VK_FENCE* pFences)
+VK_LAYER_EXPORT VkResult VKAPI vkResetFences(VkDevice device, uint32_t fenceCount, VkFence* pFences)
{
- VK_RESULT result = nextTable.ResetFences(device, fenceCount, pFences);
+ VkResult result = nextTable.ResetFences(device, fenceCount, pFences);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
// Reset fence state in fenceCreateInfo structure
@@ -1171,7 +1171,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkResetFences(VK_DEVICE device, uint32_t fenceCo
MT_OBJ_INFO* pObjectInfo = getObjectInfo(pFences[i]);
if (pObjectInfo != NULL) {
pObjectInfo->create_info.fence_create_info.flags =
- static_cast<VK_FENCE_CREATE_FLAGS>(pObjectInfo->create_info.fence_create_info.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
+ static_cast<VkFenceCreateFlags>(pObjectInfo->create_info.fence_create_info.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
}
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -1179,9 +1179,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkResetFences(VK_DEVICE device, uint32_t fenceCo
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFenceStatus(VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFenceStatus(VkFence fence)
{
- VK_RESULT result = nextTable.GetFenceStatus(fence);
+ VkResult result = nextTable.GetFenceStatus(fence);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
updateFenceTracking(fence);
@@ -1190,7 +1190,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFenceStatus(VK_FENCE fence)
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWaitForFences(VK_DEVICE device, uint32_t fenceCount, const VK_FENCE* pFences, bool32_t waitAll, uint64_t timeout)
+VK_LAYER_EXPORT VkResult VKAPI vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, bool32_t waitAll, uint64_t timeout)
{
// Verify fence status of submitted fences
for(uint32_t i = 0; i < fenceCount; i++) {
@@ -1204,7 +1204,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWaitForFences(VK_DEVICE device, uint32_t fence
}
}
- VK_RESULT result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
+ VkResult result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
loader_platform_thread_lock_mutex(&globalLock);
if (VK_SUCCESS == result) {
@@ -1218,9 +1218,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWaitForFences(VK_DEVICE device, uint32_t fence
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(VK_QUEUE queue)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueWaitIdle(VkQueue queue)
{
- VK_RESULT result = nextTable.QueueWaitIdle(queue);
+ VkResult result = nextTable.QueueWaitIdle(queue);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
retireQueueFences(queue);
@@ -1229,9 +1229,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(VK_QUEUE queue)
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDeviceWaitIdle(VkDevice device)
{
- VK_RESULT result = nextTable.DeviceWaitIdle(device);
+ VkResult result = nextTable.DeviceWaitIdle(device);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
retireDeviceFences(device);
@@ -1240,31 +1240,31 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(VK_DEVICE device)
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateEvent(VK_DEVICE device, const VK_EVENT_CREATE_INFO* pCreateInfo, VK_EVENT* pEvent)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent)
{
- VK_RESULT result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
+ VkResult result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pEvent, pCreateInfo->sType, pCreateInfo, sizeof(VK_EVENT_CREATE_INFO), "event");
+ addObjectInfo(*pEvent, pCreateInfo->sType, pCreateInfo, sizeof(VkEventCreateInfo), "event");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateQueryPool(VK_DEVICE device, const VK_QUERY_POOL_CREATE_INFO* pCreateInfo, VK_QUERY_POOL* pQueryPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool)
{
- VK_RESULT result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
+ VkResult result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pQueryPool, pCreateInfo->sType, pCreateInfo, sizeof(VK_QUERY_POOL_CREATE_INFO), "query_pool");
+ addObjectInfo(*pQueryPool, pCreateInfo->sType, pCreateInfo, sizeof(VkQueryPoolCreateInfo), "query_pool");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferCreateInfo* pCreateInfo, VK_BUFFER* pBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer)
{
- VK_RESULT result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
+ VkResult result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
addObjectInfo(*pBuffer, pCreateInfo->sType, pCreateInfo, sizeof(VkBufferCreateInfo), "buffer");
@@ -1273,9 +1273,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferC
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBufferViewCreateInfo* pCreateInfo, VK_BUFFER_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView)
{
- VK_RESULT result = nextTable.CreateBufferView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateBufferView(device, pCreateInfo, pView);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VkBufferViewCreateInfo), "buffer_view");
@@ -1284,156 +1284,156 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBuf
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage)
{
- VK_RESULT result = nextTable.CreateImage(device, pCreateInfo, pImage);
+ VkResult result = nextTable.CreateImage(device, pCreateInfo, pImage);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pImage, pCreateInfo->sType, pCreateInfo, sizeof(VK_IMAGE_CREATE_INFO), "image");
+ addObjectInfo(*pImage, pCreateInfo->sType, pCreateInfo, sizeof(VkImageCreateInfo), "image");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo, VK_IMAGE_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView)
{
- VK_RESULT result = nextTable.CreateImageView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateImageView(device, pCreateInfo, pView);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VK_IMAGE_VIEW_CREATE_INFO), "image_view");
+ addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VkImageViewCreateInfo), "image_view");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(VK_DEVICE device, const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
- VK_COLOR_ATTACHMENT_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateColorAttachmentView(VkDevice device, const VkColorAttachmentViewCreateInfo* pCreateInfo,
+ VkColorAttachmentView* pView)
{
- VK_RESULT result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO), "color_attachment_view");
+ addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VkColorAttachmentViewCreateInfo), "color_attachment_view");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(VK_DEVICE device, const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo, VK_DEPTH_STENCIL_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDepthStencilView(VkDevice device, const VkDepthStencilViewCreateInfo* pCreateInfo, VkDepthStencilView* pView)
{
- VK_RESULT result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VK_DEPTH_STENCIL_VIEW_CREATE_INFO), "ds_view");
+ addObjectInfo(*pView, pCreateInfo->sType, pCreateInfo, sizeof(VkDepthStencilViewCreateInfo), "ds_view");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateShader(VK_DEVICE device, const VK_SHADER_CREATE_INFO* pCreateInfo, VK_SHADER* pShader)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateShader(VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader)
{
- VK_RESULT result = nextTable.CreateShader(device, pCreateInfo, pShader);
+ VkResult result = nextTable.CreateShader(device, pCreateInfo, pShader);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VK_GRAPHICS_PIPELINE_CREATE_INFO), "graphics_pipeline");
+ addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo), "graphics_pipeline");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(
- VK_DEVICE device,
- const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE basePipeline,
- VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipelineDerivative(
+ VkDevice device,
+ const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline basePipeline,
+ VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VK_GRAPHICS_PIPELINE_CREATE_INFO), "graphics_pipeline");
+ addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo), "graphics_pipeline");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(VK_DEVICE device, const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateComputePipeline(VkDevice device, const VkComputePipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VK_COMPUTE_PIPELINE_CREATE_INFO), "compute_pipeline");
+ addObjectInfo(*pPipeline, pCreateInfo->sType, pCreateInfo, sizeof(VkComputePipelineCreateInfo), "compute_pipeline");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPLER_CREATE_INFO* pCreateInfo, VK_SAMPLER* pSampler)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler)
{
- VK_RESULT result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
+ VkResult result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pSampler, pCreateInfo->sType, pCreateInfo, sizeof(VK_SAMPLER_CREATE_INFO), "sampler");
+ addObjectInfo(*pSampler, pCreateInfo->sType, pCreateInfo, sizeof(VkSamplerCreateInfo), "sampler");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_VP_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicViewportState(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo,
+ VkDynamicVpStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VK_DYNAMIC_VP_STATE_CREATE_INFO), "viewport_state");
+ addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VkDynamicVpStateCreateInfo), "viewport_state");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_RS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicRasterState(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo,
+ VkDynamicRsStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VK_DYNAMIC_RS_STATE_CREATE_INFO), "raster_state");
+ addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VkDynamicRsStateCreateInfo), "raster_state");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device, const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_CB_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo,
+ VkDynamicCbStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VK_DYNAMIC_CB_STATE_CREATE_INFO), "cb_state");
+ addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VkDynamicCbStateCreateInfo), "cb_state");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE device, const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo,
- VK_DYNAMIC_DS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo,
+ VkDynamicDsStateObject* pState)
{
- VK_RESULT result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
- addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VK_DYNAMIC_DS_STATE_CREATE_INFO), "ds_state");
+ addObjectInfo(*pState, pCreateInfo->sType, pCreateInfo, sizeof(VkDynamicDsStateCreateInfo), "ds_state");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, VK_CMD_BUFFER* pCmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateCommandBuffer(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer)
{
- VK_RESULT result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
+ VkResult result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
// At time of cmd buffer creation, create global cmd buffer info for the returned cmd buffer
loader_platform_thread_lock_mutex(&globalLock);
if (*pCmdBuffer)
@@ -1443,7 +1443,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device, const VK
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginCommandBuffer(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
{
// This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
MT_CB_INFO* pCBInfo = getCBInfo(cmdBuffer);
@@ -1455,21 +1455,21 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, co
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, cmdBuffer, 0, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", str);
}
}
- VK_RESULT result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
+ VkResult result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
loader_platform_thread_lock_mutex(&globalLock);
freeCBBindings(cmdBuffer);
loader_platform_thread_unlock_mutex(&globalLock);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkEndCommandBuffer(VkCmdBuffer cmdBuffer)
{
// TODO : Anything to do here?
- VK_RESULT result = nextTable.EndCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.EndCommandBuffer(cmdBuffer);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkResetCommandBuffer(VkCmdBuffer cmdBuffer)
{
// Verify that CB is complete (not in-flight)
MT_CB_INFO* pCBInfo = getCBInfo(cmdBuffer);
@@ -1485,12 +1485,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
loader_platform_thread_lock_mutex(&globalLock);
freeCBBindings(cmdBuffer);
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.ResetCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.ResetCommandBuffer(cmdBuffer);
return result;
}
// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
// need to account for that mem now having binding to given cmdBuffer
-VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_PIPELINE pipeline)
+VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
#if 0
// TODO : If memory bound to pipeline, then need to tie that mem to cmdBuffer
@@ -1513,7 +1513,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELIN
nextTable.CmdBindPipeline(cmdBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer, VK_STATE_BIND_POINT stateBindPoint, VK_DYNAMIC_STATE_OBJECT state)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state)
{
MT_OBJ_INFO *pObjInfo;
loader_platform_thread_lock_mutex(&globalLock);
@@ -1535,32 +1535,32 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer,
}
VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(
- VK_CMD_BUFFER cmdBuffer,
- VK_PIPELINE_BIND_POINT pipelineBindPoint,
- VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain,
+ VkCmdBuffer cmdBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkDescriptorSetLayoutChain layoutChain,
uint32_t layoutChainSlot,
uint32_t count,
- const VK_DESCRIPTOR_SET* pDescriptorSets,
+ const VkDescriptorSet* pDescriptorSets,
const uint32_t* pUserData)
{
// TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
nextTable.CmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layoutChain, layoutChainSlot, count, pDescriptorSets, pUserData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t binding)
+VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t binding)
{
nextTable.CmdBindVertexBuffer(cmdBuffer, buffer, offset, binding);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, VK_INDEX_TYPE indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
{
nextTable.CmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(buffer);
+ VkGpuMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDrawIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
@@ -1570,10 +1570,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(buffer);
+ VkGpuMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDrawIndexedIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
@@ -1583,10 +1583,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(buffer);
+ VkGpuMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDispatchIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
@@ -1596,11 +1596,11 @@ VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUF
nextTable.CmdDispatchIndirect(cmdBuffer, buffer, offset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_BUFFER destBuffer,
- uint32_t regionCount, const VK_BUFFER_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer,
+ uint32_t regionCount, const VkBufferCopy* pRegions)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(srcBuffer);
+ VkGpuMemory mem = getMemBindingFromObject(srcBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyBuffer() call unable to update binding of srcBuffer %p to cmdBuffer %p", srcBuffer, cmdBuffer);
@@ -1616,32 +1616,32 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER sr
nextTable.CmdCopyBuffer(cmdBuffer, srcBuffer, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkImageCopy* pRegions)
{
// TODO : Each image will have mem mapping so track them
nextTable.CmdCopyImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_IMAGE_BLIT* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkImageBlit* pRegions)
{
// TODO : Each image will have mem mapping so track them
nextTable.CmdBlitImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer,
- VK_BUFFER srcBuffer,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VkCmdBuffer cmdBuffer,
+ VkBuffer srcBuffer,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
// TODO : Track this
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(destImage);
+ VkGpuMemory mem = getMemBindingFromObject(destImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyMemoryToImage() call unable to update binding of destImage buffer %p to cmdBuffer %p", destImage, cmdBuffer);
@@ -1658,14 +1658,14 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdCopyBufferToImage(cmdBuffer, srcBuffer, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_BUFFER destBuffer,
- uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer destBuffer,
+ uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
// TODO : Track this
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(srcImage);
+ VkGpuMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyImageToMemory() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
@@ -1681,12 +1681,12 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdCopyImageToBuffer(cmdBuffer, srcImage, srcImageLayout, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout)
+VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout)
{
// TODO : Each image will have mem mapping so track them
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(srcImage);
+ VkGpuMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCloneImageData() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
@@ -1702,10 +1702,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(destBuffer);
+ VkGpuMemory mem = getMemBindingFromObject(destBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdUpdateMemory() call unable to update binding of destBuffer %p to cmdBuffer %p", destBuffer, cmdBuffer);
@@ -1715,10 +1715,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(destBuffer);
+ VkGpuMemory mem = getMemBindingFromObject(destBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdFillMemory() call unable to update binding of destBuffer %p to cmdBuffer %p", destBuffer, cmdBuffer);
@@ -1728,14 +1728,14 @@ VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER de
nextTable.CmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout,
- VK_CLEAR_COLOR color,
- uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VkCmdBuffer cmdBuffer,
+ VkImage image, VkImageLayout imageLayout,
+ VkClearColor color,
+ uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(image);
+ VkGpuMemory mem = getMemBindingFromObject(image);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdClearColorImage() call unable to update binding of image buffer %p to cmdBuffer %p", image, cmdBuffer);
@@ -1745,14 +1745,14 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdClearColorImage(cmdBuffer, image, imageLayout, color, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout,
+VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VkCmdBuffer cmdBuffer,
+ VkImage image, VkImageLayout imageLayout,
float depth, uint32_t stencil,
- uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+ uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(image);
+ VkGpuMemory mem = getMemBindingFromObject(image);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdClearDepthStencil() call unable to update binding of image buffer %p to cmdBuffer %p", image, cmdBuffer);
@@ -1762,13 +1762,13 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdClearDepthStencil(cmdBuffer, image, imageLayout, depth, stencil, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer,
- VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout,
- VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout,
- uint32_t rectCount, const VK_IMAGE_RESOLVE* pRects)
+VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VkCmdBuffer cmdBuffer,
+ VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage destImage, VkImageLayout destImageLayout,
+ uint32_t rectCount, const VkImageResolve* pRects)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(srcImage);
+ VkGpuMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdResolveImage() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
@@ -1784,10 +1784,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdResolveImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, rectCount, pRects);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot, VK_FLAGS flags)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(queryPool);
+ VkGpuMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdBeginQuery() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
@@ -1797,10 +1797,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POO
nextTable.CmdBeginQuery(cmdBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot)
+VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(queryPool);
+ VkGpuMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdEndQuery() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
@@ -1810,10 +1810,10 @@ VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL
nextTable.CmdEndQuery(cmdBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount)
+VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount)
{
loader_platform_thread_lock_mutex(&globalLock);
- VK_GPU_MEMORY mem = getMemBindingFromObject(queryPool);
+ VkGpuMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdResetQueryPool() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
@@ -1823,7 +1823,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
{
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));
@@ -1837,11 +1837,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, V
if (g_actionIsDefault) {
g_debugAction = VK_DBG_LAYER_ACTION_CALLBACK;
}
- VK_RESULT result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
+ VkResult result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
VK_LAYER_DBG_FUNCTION_NODE *pInfo = g_pDbgFunctionHead;
VK_LAYER_DBG_FUNCTION_NODE *pPrev = pInfo;
@@ -1864,19 +1864,19 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance,
g_debugAction = (VK_LAYER_DBG_ACTION)(g_debugAction & ~((uint32_t)VK_DBG_LAYER_ACTION_CALLBACK));
}
}
- VK_RESULT result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
+ VkResult result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
return result;
}
#if !defined(WIN32)
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(VK_DEVICE device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
- VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
+ VkImage* pImage, VkGpuMemory* pMem)
{
- VK_RESULT result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
+ VkResult result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
loader_platform_thread_lock_mutex(&globalLock);
if (VK_SUCCESS == result) {
// Add image object, then insert the new Mem Object and then bind it to created image
- addObjectInfo(*pImage, _VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, sizeof(VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO), "wsi_x11_image");
+ addObjectInfo(*pImage, VkStructureType__MAX_ENUM, pCreateInfo, sizeof(VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO), "wsi_x11_image");
addMemObjInfo(*pMem, NULL);
if (VK_FALSE == updateObjectBinding(*pImage, *pMem)) {
char str[1024];
@@ -1890,7 +1890,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(VK_DEVICE device,
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(VK_QUEUE queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11QueuePresent(VkQueue queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VkFence fence)
{
loader_platform_thread_lock_mutex(&globalLock);
addFenceInfo(fence, queue);
@@ -1898,12 +1898,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(VK_QUEUE queue, const VK_WS
sprintf(str, "In vkWsiX11QueuePresent(), checking queue %p for fence %p", queue, fence);
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, queue, 0, MEMTRACK_NONE, "MEM", str);
loader_platform_thread_unlock_mutex(&globalLock);
- VK_RESULT result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
+ VkResult result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
return result;
}
#endif // WIN32
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
@@ -2063,6 +2063,6 @@ VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcN
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
}
}
diff --git a/layers/mem_tracker.h b/layers/mem_tracker.h
index d3a8608f..b0b9d6b8 100644
--- a/layers/mem_tracker.h
+++ b/layers/mem_tracker.h
@@ -85,29 +85,29 @@ typedef enum _MEM_TRACK_ERROR
// Data struct for tracking memory object
struct MT_MEM_OBJ_INFO {
uint32_t refCount; // Count of references (obj bindings or CB use)
- VK_GPU_MEMORY mem;
+ VkGpuMemory mem;
VkMemoryAllocInfo allocInfo;
- list<VK_OBJECT> pObjBindings; // list container of objects bound to this memory
- list<VK_CMD_BUFFER> pCmdBufferBindings; // list container of cmd buffers that reference this mem object
+ list<VkObject> pObjBindings; // list container of objects bound to this memory
+ list<VkCmdBuffer> pCmdBufferBindings; // list container of cmd buffers that reference this mem object
};
struct MT_OBJ_INFO {
MT_MEM_OBJ_INFO* pMemObjInfo;
- VK_OBJECT object;
- VK_STRUCTURE_TYPE sType;
+ VkObject object;
+ VkStructureType sType;
uint32_t ref_count;
// Capture all object types that may have memory bound. From prog guide:
// The only objects that are guaranteed to have no external memory
// requirements are devices, queues, command buffers, shaders and memory objects.
union {
- VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO color_attachment_view_create_info;
- VK_DEPTH_STENCIL_VIEW_CREATE_INFO ds_view_create_info;
- VK_IMAGE_VIEW_CREATE_INFO image_view_create_info;
- VK_IMAGE_CREATE_INFO image_create_info;
- VK_GRAPHICS_PIPELINE_CREATE_INFO graphics_pipeline_create_info;
- VK_COMPUTE_PIPELINE_CREATE_INFO compute_pipeline_create_info;
- VK_SAMPLER_CREATE_INFO sampler_create_info;
- VK_FENCE_CREATE_INFO fence_create_info;
+ VkColorAttachmentViewCreateInfo color_attachment_view_create_info;
+ VkDepthStencilViewCreateInfo ds_view_create_info;
+ VkImageViewCreateInfo image_view_create_info;
+ VkImageCreateInfo image_create_info;
+ VkGraphicsPipelineCreateInfo graphics_pipeline_create_info;
+ VkComputePipelineCreateInfo compute_pipeline_create_info;
+ VkSamplerCreateInfo sampler_create_info;
+ VkFenceCreateInfo fence_create_info;
#ifndef _WIN32
VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO wsi_x11_presentable_image_create_info;
#endif // _WIN32
@@ -117,21 +117,21 @@ struct MT_OBJ_INFO {
// Track all command buffers
struct MT_CB_INFO {
- VK_CMD_BUFFER_CREATE_INFO createInfo;
+ VkCmdBufferCreateInfo createInfo;
MT_OBJ_INFO* pDynamicState[VK_NUM_STATE_BIND_POINT];
- VK_PIPELINE pipelines[VK_NUM_PIPELINE_BIND_POINT];
+ VkPipeline pipelines[VK_NUM_PIPELINE_BIND_POINT];
uint32_t colorAttachmentCount;
- VK_DEPTH_STENCIL_BIND_INFO dsBindInfo;
- VK_CMD_BUFFER cmdBuffer;
+ VkDepthStencilBindInfo dsBindInfo;
+ VkCmdBuffer cmdBuffer;
uint64_t fenceId;
// Order dependent, stl containers must be at end of struct
- list<VK_GPU_MEMORY> pMemObjList; // List container of Mem objs referenced by this CB
+ list<VkGpuMemory> pMemObjList; // List container of Mem objs referenced by this CB
};
// Associate fenceId with a fence object
struct MT_FENCE_INFO {
- VK_FENCE fence; // Handle to fence object
- VK_QUEUE queue; // Queue that this fence is submitted against
+ VkFence fence; // Handle to fence object
+ VkQueue queue; // Queue that this fence is submitted against
bool32_t localFence; // Is fence created by layer?
};
@@ -139,8 +139,8 @@ struct MT_FENCE_INFO {
struct MT_QUEUE_INFO {
uint64_t lastRetiredId;
uint64_t lastSubmittedId;
- list<VK_CMD_BUFFER> pQueueCmdBuffers;
- list<VK_GPU_MEMORY> pMemRefList;
+ list<VkCmdBuffer> pQueueCmdBuffers;
+ list<VkGpuMemory> pMemRefList;
};
#ifdef __cplusplus
diff --git a/layers/multi.cpp b/layers/multi.cpp
index 3f967d6a..1c9dc178 100644
--- a/layers/multi.cpp
+++ b/layers/multi.cpp
@@ -62,44 +62,44 @@ extern "C" {
#endif
-VK_LAYER_EXPORT VK_RESULT VKAPI multi1CreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo,
- VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI multi1CreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo,
+ VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
VK_LAYER_DISPATCH_TABLE* pTable = getLayer1Table(gpuw);
printf("At start of multi1 layer vkCreateDevice()\n");
- VK_RESULT result = pTable->CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = pTable->CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
// create a mapping for the device object into the dispatch table
tableMap1.emplace(*pDevice, pTable);
printf("Completed multi1 layer vkCreateDevice()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi1CreateGraphicsPipeline(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo,
- VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI multi1CreateGraphicsPipeline(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo,
+ VkPipeline* pPipeline)
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap1[device];
printf("At start of multi1 layer vkCreateGraphicsPipeline()\n");
- VK_RESULT result = pTable->CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
+ VkResult result = pTable->CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
// create a mapping for the pipeline object into the dispatch table
tableMap1.emplace(*pPipeline, pTable);
printf("Completed multi1 layer vkCreateGraphicsPipeline()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi1StorePipeline(VK_PIPELINE pipeline, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI multi1StorePipeline(VkPipeline pipeline, size_t* pDataSize, void* pData)
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap1[pipeline];
printf("At start of multi1 layer vkStorePipeline()\n");
- VK_RESULT result = pTable->StorePipeline(pipeline, pDataSize, pData);
+ VkResult result = pTable->StorePipeline(pipeline, pDataSize, pData);
printf("Completed multi1 layer vkStorePipeline()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi1EnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI multi1EnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
@@ -110,12 +110,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI multi1EnumerateLayers(VK_PHYSICAL_GPU gpu, size_
VK_LAYER_DISPATCH_TABLE* pTable = getLayer1Table(gpuw);
printf("At start of multi1 layer vkEnumerateLayers()\n");
- VK_RESULT result = pTable->EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = pTable->EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
printf("Completed multi1 layer vkEnumerateLayers()\n");
return result;
}
-VK_LAYER_EXPORT void * VKAPI multi1GetProcAddr(VK_PHYSICAL_GPU gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI multi1GetProcAddr(VkPhysicalGpu gpu, const char* pName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
@@ -137,7 +137,7 @@ VK_LAYER_EXPORT void * VKAPI multi1GetProcAddr(VK_PHYSICAL_GPU gpu, const char*
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
}
}
@@ -163,45 +163,45 @@ static VK_LAYER_DISPATCH_TABLE * getLayer2Table(const VK_BASE_LAYER_OBJECT *gpuw
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi2CreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo,
- VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI multi2CreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo,
+ VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
VK_LAYER_DISPATCH_TABLE* pTable = getLayer2Table(gpuw);
printf("At start of multi2 vkCreateDevice()\n");
- VK_RESULT result = pTable->CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = pTable->CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
// create a mapping for the device object into the dispatch table for layer2
tableMap2.emplace(*pDevice, pTable);
printf("Completed multi2 layer vkCreateDevice()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi2CreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo,
- VK_CMD_BUFFER* pCmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI multi2CreateCommandBuffer(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo,
+ VkCmdBuffer* pCmdBuffer)
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap2[device];
printf("At start of multi2 layer vkCreateCommandBuffer()\n");
- VK_RESULT result = pTable->CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
+ VkResult result = pTable->CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
// create a mapping for CmdBuffer object into the dispatch table for layer 2
tableMap2.emplace(*pCmdBuffer, pTable);
printf("Completed multi2 layer vkCreateCommandBuffer()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi2BeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo)
+VK_LAYER_EXPORT VkResult VKAPI multi2BeginCommandBuffer(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap2[cmdBuffer];
printf("At start of multi2 layer vkBeginCommandBuffer()\n");
- VK_RESULT result = pTable->BeginCommandBuffer(cmdBuffer, pBeginInfo);
+ VkResult result = pTable->BeginCommandBuffer(cmdBuffer, pBeginInfo);
printf("Completed multi2 layer vkBeginCommandBuffer()\n");
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI multi2EnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI multi2EnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
@@ -212,12 +212,12 @@ VK_LAYER_EXPORT VK_RESULT VKAPI multi2EnumerateLayers(VK_PHYSICAL_GPU gpu, size_
VK_LAYER_DISPATCH_TABLE* pTable = getLayer2Table(gpuw);
printf("At start of multi2 layer vkEnumerateLayers()\n");
- VK_RESULT result = pTable->EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = pTable->EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
printf("Completed multi2 layer vkEnumerateLayers()\n");
return result;
}
-VK_LAYER_EXPORT void * VKAPI multi2GetProcAddr(VK_PHYSICAL_GPU gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI multi2GetProcAddr(VkPhysicalGpu gpu, const char* pName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
@@ -239,12 +239,12 @@ VK_LAYER_EXPORT void * VKAPI multi2GetProcAddr(VK_PHYSICAL_GPU gpu, const char*
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
}
}
/********************************* Common functions ********************************/
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
@@ -259,9 +259,9 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
- VK_RESULT result;
+ VkResult result;
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
/* This entrypoint is NOT going to init it's own dispatch table since loader calls here early */
@@ -274,11 +274,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
} else if (!tableMap1.empty() && (tableMap1.find(gpuw) != tableMap1.end()))
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap1[gpuw];
- result = pTable->GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ result = pTable->GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
} else if (!tableMap2.empty() && (tableMap2.find(gpuw) != tableMap2.end()))
{
VK_LAYER_DISPATCH_TABLE* pTable = tableMap2[gpuw];
- result = pTable->GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ result = pTable->GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
} else
{
result = VK_ERROR_INVALID_EXTENSION;
@@ -286,7 +286,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const
return result;
}
-VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
{
// to find each layers GPA routine Loader will search via "<layerName>GetProcAddr"
if (!strncmp("multi1GetProcAddr", pName, sizeof("multi1GetProcAddr")))
@@ -317,5 +317,5 @@ static void initLayerTable(const VK_BASE_LAYER_OBJECT *gpuw, VK_LAYER_DISPATCH_T
if (layerNum == 1 && layer2_first_activated == false)
layer1_first_activated = true;
- layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VK_PHYSICAL_GPU) gpuw->nextObject);
+ layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
}
diff --git a/layers/object_track.h b/layers/object_track.h
index a0050c86..cc45d7e3 100644
--- a/layers/object_track.h
+++ b/layers/object_track.h
@@ -174,8 +174,8 @@ typedef struct _OBJTRACK_NODE {
// prototype for extension functions
uint64_t objTrackGetObjectCount(VK_OBJECT_TYPE type);
-VK_RESULT objTrackGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
+VkResult objTrackGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
// Func ptr typedefs
typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VK_OBJECT_TYPE);
-typedef VK_RESULT (*OBJ_TRACK_GET_OBJECTS)(VK_OBJECT_TYPE, uint64_t, OBJTRACK_NODE*);
+typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VK_OBJECT_TYPE, uint64_t, OBJTRACK_NODE*);
diff --git a/layers/param_checker.cpp b/layers/param_checker.cpp
index 9be48ade..bd9646e1 100644
--- a/layers/param_checker.cpp
+++ b/layers/param_checker.cpp
@@ -64,18 +64,18 @@ static void initParamChecker(void)
g_logFile = stdout;
}
- vkGetProcAddrType fpNextGPA;
+ PFN_vkGetProcAddr fpNextGPA;
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
}
-void PreCreateInstance(const VK_APPLICATION_INFO* pAppInfo, const VK_ALLOC_CALLBACKS* pAllocCb)
+void PreCreateInstance(const VkApplicationInfo* pAppInfo, const VkAllocCallbacks* pAllocCb)
{
if(pAppInfo == nullptr)
{
- char const str[] = "vkCreateInstance parameter, VK_APPLICATION_INFO* pAppInfo, is "\
+ char const str[] = "vkCreateInstance parameter, VkApplicationInfo* pAppInfo, is "\
"nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -97,7 +97,7 @@ void PreCreateInstance(const VK_APPLICATION_INFO* pAppInfo, const VK_ALLOC_CALLB
{
if(!vk_validate_vk_alloc_callbacks(pAllocCb))
{
- char const str[] = "vkCreateInstance parameter, VK_ALLOC_CALLBACKS* pAllocCb, "\
+ char const str[] = "vkCreateInstance parameter, VkAllocCallbacks* pAllocCb, "\
"contains an invalid value (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -105,11 +105,11 @@ void PreCreateInstance(const VK_APPLICATION_INFO* pAppInfo, const VK_ALLOC_CALLB
}
}
-void PostCreateInstance(VK_RESULT result, VK_INSTANCE* pInstance)
+void PostCreateInstance(VkResult result, VkInstance* pInstance)
{
if(result != VK_SUCCESS)
{
- // TODO: Spit out VK_RESULT value.
+ // TODO: Spit out VkResult value.
char const str[] = "vkCreateInstance failed (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -117,36 +117,36 @@ void PostCreateInstance(VK_RESULT result, VK_INSTANCE* pInstance)
if(pInstance == nullptr)
{
- char const str[] = "vkCreateInstance parameter, VK_INSTANCE* pInstance, is nullptr "\
+ char const str[] = "vkCreateInstance parameter, VkInstance* pInstance, is nullptr "\
"(postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, VK_INSTANCE* pInstance)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance)
{
PreCreateInstance(pCreateInfo->pAppInfo, pCreateInfo->pAllocCb);
- VK_RESULT result = nextTable.CreateInstance(pCreateInfo, pInstance);
+ VkResult result = nextTable.CreateInstance(pCreateInfo, pInstance);
PostCreateInstance(result, pInstance);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyInstance(VK_INSTANCE instance)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyInstance(VkInstance instance)
{
- VK_RESULT result = nextTable.DestroyInstance(instance);
+ VkResult result = nextTable.DestroyInstance(instance);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateGpus(VK_INSTANCE instance, uint32_t maxGpus, uint32_t* pGpuCount, VK_PHYSICAL_GPU* pGpus)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateGpus(VkInstance instance, uint32_t maxGpus, uint32_t* pGpuCount, VkPhysicalGpu* pGpus)
{
- VK_RESULT result = nextTable.EnumerateGpus(instance, maxGpus, pGpuCount, pGpus);
+ VkResult result = nextTable.EnumerateGpus(instance, maxGpus, pGpuCount, pGpus);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetGpuInfo(VK_PHYSICAL_GPU gpu, VK_PHYSICAL_GPU_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetGpuInfo(VkPhysicalGpu gpu, VkPhysicalGpuInfoType infoType, size_t* pDataSize, void* pData)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
@@ -156,15 +156,15 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetGpuInfo(VK_PHYSICAL_GPU gpu, VK_PHYSICAL_GP
sprintf(str, "Parameter infoType to function GetGpuInfo has invalid value of %i.", (int)infoType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.GetGpuInfo((VK_PHYSICAL_GPU)gpuw->nextObject, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetGpuInfo((VkPhysicalGpu)gpuw->nextObject, infoType, pDataSize, pData);
return result;
}
-void PreCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo)
+void PreCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo)
{
if(gpu == nullptr)
{
- char const str[] = "vkCreateDevice parameter, VK_PHYSICAL_GPU gpu, is nullptr "\
+ char const str[] = "vkCreateDevice parameter, VkPhysicalGpu gpu, is nullptr "\
"(precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -216,18 +216,18 @@ void PreCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo)
if(!validate_VK_VALIDATION_LEVEL(pCreateInfo->maxValidationLevel))
{
- char const str[] = "vkCreateDevice parameter, VK_VALIDATION_LEVEL pCreateInfo->maxValidationLevel, is "\
+ char const str[] = "vkCreateDevice parameter, VkValidationLevel pCreateInfo->maxValidationLevel, is "\
"unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-void PostCreateDevice(VK_RESULT result, VK_DEVICE* pDevice)
+void PostCreateDevice(VkResult result, VkDevice* pDevice)
{
if(result != VK_SUCCESS)
{
- // TODO: Spit out VK_RESULT value.
+ // TODO: Spit out VkResult value.
char const str[] = "vkCreateDevice failed (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -235,41 +235,41 @@ void PostCreateDevice(VK_RESULT result, VK_DEVICE* pDevice)
if(pDevice == nullptr)
{
- char const str[] = "vkCreateDevice parameter, VK_DEVICE* pDevice, is nullptr (postcondition).";
+ char const str[] = "vkCreateDevice parameter, VkDevice* pDevice, is nullptr (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDevice(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo, VK_DEVICE* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initParamChecker);
PreCreateDevice(gpu, pCreateInfo);
- VK_RESULT result = nextTable.CreateDevice((VK_PHYSICAL_GPU)gpuw->nextObject, pCreateInfo, pDevice);
+ VkResult result = nextTable.CreateDevice((VkPhysicalGpu)gpuw->nextObject, pCreateInfo, pDevice);
PostCreateDevice(result, pDevice);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyDevice(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyDevice(VkDevice device)
{
- VK_RESULT result = nextTable.DestroyDevice(device);
+ VkResult result = nextTable.DestroyDevice(device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)
+VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initParamChecker);
- VK_RESULT result = nextTable.GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);
+ VkResult result = nextTable.GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
char str[1024];
if (gpu != NULL) {
@@ -278,7 +278,7 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, nullptr, 0, 0, "PARAMCHECK", str);
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initParamChecker);
- VK_RESULT result = nextTable.EnumerateLayers((VK_PHYSICAL_GPU)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
+ VkResult result = nextTable.EnumerateLayers((VkPhysicalGpu)gpuw->nextObject, maxLayerCount, maxStringSize, pOutLayerCount, pOutLayers, pReserved);
sprintf(str, "Completed layered EnumerateLayers\n");
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, nullptr, 0, 0, "PARAMCHECK", str);
fflush(stdout);
@@ -293,45 +293,45 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t ma
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetDeviceQueue(VK_DEVICE device, uint32_t queueNodeIndex, uint32_t queueIndex, VK_QUEUE* pQueue)
+VK_LAYER_EXPORT VkResult VKAPI vkGetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue* pQueue)
{
- VK_RESULT result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
+ VkResult result = nextTable.GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSubmit(VK_QUEUE queue, uint32_t cmdBufferCount, const VK_CMD_BUFFER* pCmdBuffers, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSubmit(VkQueue queue, uint32_t cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence)
{
- VK_RESULT result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
+ VkResult result = nextTable.QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueAddMemReference(VK_QUEUE queue, VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReference(VkQueue queue, VkGpuMemory mem)
{
- VK_RESULT result = nextTable.QueueAddMemReference(queue, mem);
+ VkResult result = nextTable.QueueAddMemReference(queue, mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueRemoveMemReference(VK_QUEUE queue, VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReference(VkQueue queue, VkGpuMemory mem)
{
- VK_RESULT result = nextTable.QueueRemoveMemReference(queue, mem);
+ VkResult result = nextTable.QueueRemoveMemReference(queue, mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitIdle(VK_QUEUE queue)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueWaitIdle(VkQueue queue)
{
- VK_RESULT result = nextTable.QueueWaitIdle(queue);
+ VkResult result = nextTable.QueueWaitIdle(queue);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDeviceWaitIdle(VK_DEVICE device)
+VK_LAYER_EXPORT VkResult VKAPI vkDeviceWaitIdle(VkDevice device)
{
- VK_RESULT result = nextTable.DeviceWaitIdle(device);
+ VkResult result = nextTable.DeviceWaitIdle(device);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAllocInfo* pAllocInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem)
{
char str[1024];
if (!pAllocInfo) {
@@ -341,60 +341,60 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocMemory(VK_DEVICE device, const VkMemoryAl
sprintf(str, "Parameter pAllocInfo to function AllocMemory contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.AllocMemory(device, pAllocInfo, pMem);
+ VkResult result = nextTable.AllocMemory(device, pAllocInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkFreeMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkGpuMemory mem)
{
- VK_RESULT result = nextTable.FreeMemory(mem);
+ VkResult result = nextTable.FreeMemory(mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkSetMemoryPriority(VK_GPU_MEMORY mem, VK_MEMORY_PRIORITY priority)
+VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkGpuMemory mem, VkMemoryPriority priority)
{
char str[1024];
if (!validate_VK_MEMORY_PRIORITY(priority)) {
sprintf(str, "Parameter priority to function SetMemoryPriority has invalid value of %i.", (int)priority);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.SetMemoryPriority(mem, priority);
+ VkResult result = nextTable.SetMemoryPriority(mem, priority);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkMapMemory(VK_GPU_MEMORY mem, VK_FLAGS flags, void** ppData)
+VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkGpuMemory mem, VkFlags flags, void** ppData)
{
- VK_RESULT result = nextTable.MapMemory(mem, flags, ppData);
+ VkResult result = nextTable.MapMemory(mem, flags, ppData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkUnmapMemory(VK_GPU_MEMORY mem)
+VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkGpuMemory mem)
{
- VK_RESULT result = nextTable.UnmapMemory(mem);
+ VkResult result = nextTable.UnmapMemory(mem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkPinSystemMemory(VK_DEVICE device, const void* pSysMem, size_t memSize, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem)
{
- VK_RESULT result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
+ VkResult result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetMultiGpuCompatibility(VK_PHYSICAL_GPU gpu0, VK_PHYSICAL_GPU gpu1, VK_GPU_COMPATIBILITY_INFO* pInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkGetMultiGpuCompatibility(VkPhysicalGpu gpu0, VkPhysicalGpu gpu1, VkGpuCompatibilityInfo* pInfo)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu0;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initParamChecker);
- VK_RESULT result = nextTable.GetMultiGpuCompatibility((VK_PHYSICAL_GPU)gpuw->nextObject, gpu1, pInfo);
+ VkResult result = nextTable.GetMultiGpuCompatibility((VkPhysicalGpu)gpuw->nextObject, gpu1, pInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(VK_DEVICE device, const VK_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
@@ -405,11 +405,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedMemory(VK_DEVICE device, const VK_ME
sprintf(str, "Parameter pOpenInfo to function OpenSharedMemory contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedSemaphore(VK_DEVICE device, const VK_SEMAPHORE_OPEN_INFO* pOpenInfo, VK_SEMAPHORE* pSemaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedSemaphore(VkDevice device, const VkSemaphoreOpenInfo* pOpenInfo, VkSemaphore* pSemaphore)
{
char str[1024];
if (!pOpenInfo) {
@@ -420,11 +420,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenSharedSemaphore(VK_DEVICE device, const VK
sprintf(str, "Parameter pOpenInfo to function OpenSharedSemaphore contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.OpenSharedSemaphore(device, pOpenInfo, pSemaphore);
+ VkResult result = nextTable.OpenSharedSemaphore(device, pOpenInfo, pSemaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(VK_DEVICE device, const VK_PEER_MEMORY_OPEN_INFO* pOpenInfo, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
@@ -435,11 +435,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerMemory(VK_DEVICE device, const VK_PEER
sprintf(str, "Parameter pOpenInfo to function OpenPeerMemory contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
+ VkResult result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerImage(VK_DEVICE device, const VK_PEER_IMAGE_OPEN_INFO* pOpenInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
@@ -450,43 +450,43 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkOpenPeerImage(VK_DEVICE device, const VK_PEER_
sprintf(str, "Parameter pOpenInfo to function OpenPeerImage contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
+ VkResult result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDestroyObject(VK_OBJECT object)
+VK_LAYER_EXPORT VkResult VKAPI vkDestroyObject(VkObject object)
{
- VK_RESULT result = nextTable.DestroyObject(object);
+ VkResult result = nextTable.DestroyObject(object);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetObjectInfo(VK_BASE_OBJECT object, VK_OBJECT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData)
{
char str[1024];
if (!validate_VK_OBJECT_INFO_TYPE(infoType)) {
sprintf(str, "Parameter infoType to function GetObjectInfo has invalid value of %i.", (int)infoType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemory(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_MEMORY mem, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
{
- VK_RESULT result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
+ VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindObjectMemoryRange(VK_OBJECT object, uint32_t allocationIdx, VK_GPU_SIZE rangeOffset, VK_GPU_SIZE rangeSize, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemoryRange(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset)
{
- VK_RESULT result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
+ VkResult result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBindImageMemoryRange(VK_IMAGE image, uint32_t allocationIdx, const VK_IMAGE_MEMORY_BIND_INFO* bindInfo, VK_GPU_MEMORY mem, VK_GPU_SIZE memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkBindImageMemoryRange(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset)
{
char str[1024];
if (!bindInfo) {
@@ -497,11 +497,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBindImageMemoryRange(VK_IMAGE image, uint32_t
sprintf(str, "Parameter bindInfo to function BindImageMemoryRange contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset);
+ VkResult result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFence(VK_DEVICE device, const VK_FENCE_CREATE_INFO* pCreateInfo, VK_FENCE* pFence)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence)
{
char str[1024];
if (!pCreateInfo) {
@@ -512,32 +512,32 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFence(VK_DEVICE device, const VK_FENCE_C
sprintf(str, "Parameter pCreateInfo to function CreateFence contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateFence(device, pCreateInfo, pFence);
+ VkResult result = nextTable.CreateFence(device, pCreateInfo, pFence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFenceStatus(VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFenceStatus(VkFence fence)
{
- VK_RESULT result = nextTable.GetFenceStatus(fence);
+ VkResult result = nextTable.GetFenceStatus(fence);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWaitForFences(VK_DEVICE device, uint32_t fenceCount, const VK_FENCE* pFences, bool32_t waitAll, uint64_t timeout)
+VK_LAYER_EXPORT VkResult VKAPI vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, bool32_t waitAll, uint64_t timeout)
{
- VK_RESULT result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
+ VkResult result = nextTable.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetFences(VK_DEVICE device, uint32_t fenceCount, VK_FENCE* pFences)
+VK_LAYER_EXPORT VkResult VKAPI vkResetFences(VkDevice device, uint32_t fenceCount, VkFence* pFences)
{
- VK_RESULT result = nextTable.ResetFences(device, fenceCount, pFences);
+ VkResult result = nextTable.ResetFences(device, fenceCount, pFences);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSemaphore(VK_DEVICE device, const VK_SEMAPHORE_CREATE_INFO* pCreateInfo, VK_SEMAPHORE* pSemaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, VkSemaphore* pSemaphore)
{
char str[1024];
if (!pCreateInfo) {
@@ -548,25 +548,25 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSemaphore(VK_DEVICE device, const VK_SEM
sprintf(str, "Parameter pCreateInfo to function CreateSemaphore contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateSemaphore(device, pCreateInfo, pSemaphore);
+ VkResult result = nextTable.CreateSemaphore(device, pCreateInfo, pSemaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueSignalSemaphore(VK_QUEUE queue, VK_SEMAPHORE semaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueSignalSemaphore(VkQueue queue, VkSemaphore semaphore)
{
- VK_RESULT result = nextTable.QueueSignalSemaphore(queue, semaphore);
+ VkResult result = nextTable.QueueSignalSemaphore(queue, semaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkQueueWaitSemaphore(VK_QUEUE queue, VK_SEMAPHORE semaphore)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueWaitSemaphore(VkQueue queue, VkSemaphore semaphore)
{
- VK_RESULT result = nextTable.QueueWaitSemaphore(queue, semaphore);
+ VkResult result = nextTable.QueueWaitSemaphore(queue, semaphore);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateEvent(VK_DEVICE device, const VK_EVENT_CREATE_INFO* pCreateInfo, VK_EVENT* pEvent)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, VkEvent* pEvent)
{
char str[1024];
if (!pCreateInfo) {
@@ -577,32 +577,32 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateEvent(VK_DEVICE device, const VK_EVENT_C
sprintf(str, "Parameter pCreateInfo to function CreateEvent contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
+ VkResult result = nextTable.CreateEvent(device, pCreateInfo, pEvent);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetEventStatus(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkGetEventStatus(VkEvent event)
{
- VK_RESULT result = nextTable.GetEventStatus(event);
+ VkResult result = nextTable.GetEventStatus(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkSetEvent(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkSetEvent(VkEvent event)
{
- VK_RESULT result = nextTable.SetEvent(event);
+ VkResult result = nextTable.SetEvent(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetEvent(VK_EVENT event)
+VK_LAYER_EXPORT VkResult VKAPI vkResetEvent(VkEvent event)
{
- VK_RESULT result = nextTable.ResetEvent(event);
+ VkResult result = nextTable.ResetEvent(event);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateQueryPool(VK_DEVICE device, const VK_QUERY_POOL_CREATE_INFO* pCreateInfo, VK_QUERY_POOL* pQueryPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool)
{
char str[1024];
if (!pCreateInfo) {
@@ -613,18 +613,18 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateQueryPool(VK_DEVICE device, const VK_QUE
sprintf(str, "Parameter pCreateInfo to function CreateQueryPool contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
+ VkResult result = nextTable.CreateQueryPool(device, pCreateInfo, pQueryPool);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetQueryPoolResults(VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetQueryPoolResults(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData)
{
- VK_RESULT result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData);
+ VkResult result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFormatInfo(VK_DEVICE device, VK_FORMAT format, VK_FORMAT_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetFormatInfo(VkDevice device, VkFormat format, VkFormatInfoType infoType, size_t* pDataSize, void* pData)
{
char str[1024];
if (!validate_VK_FORMAT(format)) {
@@ -635,11 +635,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetFormatInfo(VK_DEVICE device, VK_FORMAT form
sprintf(str, "Parameter infoType to function GetFormatInfo has invalid value of %i.", (int)infoType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.GetFormatInfo(device, format, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetFormatInfo(device, format, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferCreateInfo* pCreateInfo, VK_BUFFER* pBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer)
{
char str[1024];
if (!pCreateInfo) {
@@ -650,11 +650,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBuffer(VK_DEVICE device, const VkBufferC
sprintf(str, "Parameter pCreateInfo to function CreateBuffer contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
+ VkResult result = nextTable.CreateBuffer(device, pCreateInfo, pBuffer);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBufferViewCreateInfo* pCreateInfo, VK_BUFFER_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView)
{
char str[1024];
if (!pCreateInfo) {
@@ -665,15 +665,15 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateBufferView(VK_DEVICE device, const VkBuf
sprintf(str, "Parameter pCreateInfo to function CreateBufferView contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateBufferView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateBufferView(device, pCreateInfo, pView);
return result;
}
-void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
+void PreCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo)
{
if(pCreateInfo == nullptr)
{
- char const str[] = "vkCreateImage parameter, VK_IMAGE_CREATE_INFO* pCreateInfo, is "\
+ char const str[] = "vkCreateImage parameter, VkImageCreateInfo* pCreateInfo, is "\
"nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -689,7 +689,7 @@ void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
if (!validate_VK_IMAGE_TYPE(pCreateInfo->imageType))
{
- char const str[] = "vkCreateImage parameter, VK_IMAGE_TYPE pCreateInfo->imageType, is "\
+ char const str[] = "vkCreateImage parameter, VkImageType pCreateInfo->imageType, is "\
"unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -697,19 +697,19 @@ void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
if (!validate_VK_FORMAT(pCreateInfo->format))
{
- char const str[] = "vkCreateImage parameter, VK_FORMAT pCreateInfo->format, is "\
+ char const str[] = "vkCreateImage parameter, VkFormat pCreateInfo->format, is "\
"unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
- VK_FORMAT_PROPERTIES properties;
+ VkFormatProperties properties;
size_t size = sizeof(properties);
- VK_RESULT result = nextTable.GetFormatInfo(device, pCreateInfo->format,
+ VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->format,
VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
- char const str[] = "vkCreateImage parameter, VK_FORMAT pCreateInfo->format, cannot be "\
+ char const str[] = "vkCreateImage parameter, VkFormat pCreateInfo->format, cannot be "\
"validated (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -717,7 +717,7 @@ void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
if((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0))
{
- char const str[] = "vkCreateImage parameter, VK_FORMAT pCreateInfo->format, contains "\
+ char const str[] = "vkCreateImage parameter, VkFormat pCreateInfo->format, contains "\
"unsupported format (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -726,7 +726,7 @@ void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
// TODO: Can we check device-specific limits?
if (!vk_validate_vk_extent3d(&pCreateInfo->extent))
{
- char const str[] = "vkCreateImage parameter, VK_EXTENT3D pCreateInfo->extent, is invalid "\
+ char const str[] = "vkCreateImage parameter, VkExtent3D pCreateInfo->extent, is invalid "\
"(precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -734,18 +734,18 @@ void PreCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo)
if (!validate_VK_IMAGE_TILING(pCreateInfo->tiling))
{
- char const str[] = "vkCreateImage parameter, VK_IMAGE_TILING pCreateInfo->tiling, is "\
+ char const str[] = "vkCreateImage parameter, VkImageTiling pCreateInfo->tiling, is "\
"unrecoginized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-void PostCreateImage(VK_RESULT result, VK_IMAGE* pImage)
+void PostCreateImage(VkResult result, VkImage* pImage)
{
if(result != VK_SUCCESS)
{
- // TODO: Spit out VK_RESULT value.
+ // TODO: Spit out VkResult value.
char const str[] = "vkCreateImage failed (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -753,21 +753,21 @@ void PostCreateImage(VK_RESULT result, VK_IMAGE* pImage)
if(pImage == nullptr)
{
- char const str[] = "vkCreateImage parameter, VK_IMAGE* pImage, is nullptr (postcondition).";
+ char const str[] = "vkCreateImage parameter, VkImage* pImage, is nullptr (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImage(VK_DEVICE device, const VK_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, VkImage* pImage)
{
PreCreateImage(device, pCreateInfo);
- VK_RESULT result = nextTable.CreateImage(device, pCreateInfo, pImage);
+ VkResult result = nextTable.CreateImage(device, pCreateInfo, pImage);
PostCreateImage(result, pImage);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(VK_IMAGE image, const VK_IMAGE_SUBRESOURCE* pSubresource, VK_SUBRESOURCE_INFO_TYPE infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetImageSubresourceInfo(VkImage image, const VkImageSubresource* pSubresource, VkSubresourceInfoType infoType, size_t* pDataSize, void* pData)
{
char str[1024];
if (!pSubresource) {
@@ -782,11 +782,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkGetImageSubresourceInfo(VK_IMAGE image, const
sprintf(str, "Parameter infoType to function GetImageSubresourceInfo has invalid value of %i.", (int)infoType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.GetImageSubresourceInfo(image, pSubresource, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetImageSubresourceInfo(image, pSubresource, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMAGE_VIEW_CREATE_INFO* pCreateInfo, VK_IMAGE_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, VkImageView* pView)
{
char str[1024];
if (!pCreateInfo) {
@@ -797,11 +797,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateImageView(VK_DEVICE device, const VK_IMA
sprintf(str, "Parameter pCreateInfo to function CreateImageView contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateImageView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateImageView(device, pCreateInfo, pView);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(VK_DEVICE device, const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo, VK_COLOR_ATTACHMENT_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateColorAttachmentView(VkDevice device, const VkColorAttachmentViewCreateInfo* pCreateInfo, VkColorAttachmentView* pView)
{
char str[1024];
if (!pCreateInfo) {
@@ -812,11 +812,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateColorAttachmentView(VK_DEVICE device, co
sprintf(str, "Parameter pCreateInfo to function CreateColorAttachmentView contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateColorAttachmentView(device, pCreateInfo, pView);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(VK_DEVICE device, const VK_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo, VK_DEPTH_STENCIL_VIEW* pView)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDepthStencilView(VkDevice device, const VkDepthStencilViewCreateInfo* pCreateInfo, VkDepthStencilView* pView)
{
char str[1024];
if (!pCreateInfo) {
@@ -827,11 +827,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDepthStencilView(VK_DEVICE device, const
sprintf(str, "Parameter pCreateInfo to function CreateDepthStencilView contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
+ VkResult result = nextTable.CreateDepthStencilView(device, pCreateInfo, pView);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateShader(VK_DEVICE device, const VK_SHADER_CREATE_INFO* pCreateInfo, VK_SHADER* pShader)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateShader(VkDevice device, const VkShaderCreateInfo* pCreateInfo, VkShader* pShader)
{
char str[1024];
if (!pCreateInfo) {
@@ -842,11 +842,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateShader(VK_DEVICE device, const VK_SHADER
sprintf(str, "Parameter pCreateInfo to function CreateShader contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateShader(device, pCreateInfo, pShader);
+ VkResult result = nextTable.CreateShader(device, pCreateInfo, pShader);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipeline(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
char str[1024];
if (!pCreateInfo) {
@@ -857,11 +857,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipeline(VK_DEVICE device, const
sprintf(str, "Parameter pCreateInfo to function CreateGraphicsPipeline contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipeline(device, pCreateInfo, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(VK_DEVICE device, const VK_GRAPHICS_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE basePipeline, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateGraphicsPipelineDerivative(VkDevice device, const VkGraphicsPipelineCreateInfo* pCreateInfo, VkPipeline basePipeline, VkPipeline* pPipeline)
{
char str[1024];
if (!pCreateInfo) {
@@ -872,11 +872,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateGraphicsPipelineDerivative(VK_DEVICE dev
sprintf(str, "Parameter pCreateInfo to function CreateGraphicsPipelineDerivative contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
+ VkResult result = nextTable.CreateGraphicsPipelineDerivative(device, pCreateInfo, basePipeline, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(VK_DEVICE device, const VK_COMPUTE_PIPELINE_CREATE_INFO* pCreateInfo, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateComputePipeline(VkDevice device, const VkComputePipelineCreateInfo* pCreateInfo, VkPipeline* pPipeline)
{
char str[1024];
if (!pCreateInfo) {
@@ -887,32 +887,32 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateComputePipeline(VK_DEVICE device, const
sprintf(str, "Parameter pCreateInfo to function CreateComputePipeline contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
+ VkResult result = nextTable.CreateComputePipeline(device, pCreateInfo, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkStorePipeline(VK_PIPELINE pipeline, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkStorePipeline(VkPipeline pipeline, size_t* pDataSize, void* pData)
{
- VK_RESULT result = nextTable.StorePipeline(pipeline, pDataSize, pData);
+ VkResult result = nextTable.StorePipeline(pipeline, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkLoadPipeline(VK_DEVICE device, size_t dataSize, const void* pData, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkLoadPipeline(VkDevice device, size_t dataSize, const void* pData, VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.LoadPipeline(device, dataSize, pData, pPipeline);
+ VkResult result = nextTable.LoadPipeline(device, dataSize, pData, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkLoadPipelineDerivative(VK_DEVICE device, size_t dataSize, const void* pData, VK_PIPELINE basePipeline, VK_PIPELINE* pPipeline)
+VK_LAYER_EXPORT VkResult VKAPI vkLoadPipelineDerivative(VkDevice device, size_t dataSize, const void* pData, VkPipeline basePipeline, VkPipeline* pPipeline)
{
- VK_RESULT result = nextTable.LoadPipelineDerivative(device, dataSize, pData, basePipeline, pPipeline);
+ VkResult result = nextTable.LoadPipelineDerivative(device, dataSize, pData, basePipeline, pPipeline);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPLER_CREATE_INFO* pCreateInfo, VK_SAMPLER* pSampler)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, VkSampler* pSampler)
{
char str[1024];
if (!pCreateInfo) {
@@ -923,11 +923,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateSampler(VK_DEVICE device, const VK_SAMPL
sprintf(str, "Parameter pCreateInfo to function CreateSampler contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
+ VkResult result = nextTable.CreateSampler(device, pCreateInfo, pSampler);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(VK_DEVICE device, const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_SET_LAYOUT* pSetLayout)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayout* pSetLayout)
{
char str[1024];
if (!pCreateInfo) {
@@ -938,36 +938,36 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayout(VK_DEVICE device, co
sprintf(str, "Parameter pCreateInfo to function CreateDescriptorSetLayout contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
+ VkResult result = nextTable.CreateDescriptorSetLayout(device, pCreateInfo, pSetLayout);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorSetLayoutChain(VK_DEVICE device, uint32_t setLayoutArrayCount, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayoutArray, VK_DESCRIPTOR_SET_LAYOUT_CHAIN* pLayoutChain)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorSetLayoutChain(VkDevice device, uint32_t setLayoutArrayCount, const VkDescriptorSetLayout* pSetLayoutArray, VkDescriptorSetLayoutChain* pLayoutChain)
{
- VK_RESULT result = nextTable.CreateDescriptorSetLayoutChain(device, setLayoutArrayCount, pSetLayoutArray, pLayoutChain);
+ VkResult result = nextTable.CreateDescriptorSetLayoutChain(device, setLayoutArrayCount, pSetLayoutArray, pLayoutChain);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginDescriptorPoolUpdate(VK_DEVICE device, VK_DESCRIPTOR_UPDATE_MODE updateMode)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginDescriptorPoolUpdate(VkDevice device, VkDescriptorUpdateMode updateMode)
{
char str[1024];
if (!validate_VK_DESCRIPTOR_UPDATE_MODE(updateMode)) {
sprintf(str, "Parameter updateMode to function BeginDescriptorPoolUpdate has invalid value of %i.", (int)updateMode);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
+ VkResult result = nextTable.BeginDescriptorPoolUpdate(device, updateMode);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndDescriptorPoolUpdate(VK_DEVICE device, VK_CMD_BUFFER cmd)
+VK_LAYER_EXPORT VkResult VKAPI vkEndDescriptorPoolUpdate(VkDevice device, VkCmdBuffer cmd)
{
- VK_RESULT result = nextTable.EndDescriptorPoolUpdate(device, cmd);
+ VkResult result = nextTable.EndDescriptorPoolUpdate(device, cmd);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESCRIPTOR_POOL_USAGE poolUsage, uint32_t maxSets, const VK_DESCRIPTOR_POOL_CREATE_INFO* pCreateInfo, VK_DESCRIPTOR_POOL* pDescriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDescriptorPool(VkDevice device, VkDescriptorPoolUsage poolUsage, uint32_t maxSets, const VkDescriptorPoolCreateInfo* pCreateInfo, VkDescriptorPool* pDescriptorPool)
{
char str[1024];
if (!validate_VK_DESCRIPTOR_POOL_USAGE(poolUsage)) {
@@ -982,41 +982,41 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDescriptorPool(VK_DEVICE device, VK_DESC
sprintf(str, "Parameter pCreateInfo to function CreateDescriptorPool contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
+ VkResult result = nextTable.CreateDescriptorPool(device, poolUsage, maxSets, pCreateInfo, pDescriptorPool);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetDescriptorPool(VK_DESCRIPTOR_POOL descriptorPool)
+VK_LAYER_EXPORT VkResult VKAPI vkResetDescriptorPool(VkDescriptorPool descriptorPool)
{
- VK_RESULT result = nextTable.ResetDescriptorPool(descriptorPool);
+ VkResult result = nextTable.ResetDescriptorPool(descriptorPool);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkAllocDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, VK_DESCRIPTOR_SET_USAGE setUsage, uint32_t count, const VK_DESCRIPTOR_SET_LAYOUT* pSetLayouts, VK_DESCRIPTOR_SET* pDescriptorSets, uint32_t* pCount)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocDescriptorSets(VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, uint32_t count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, uint32_t* pCount)
{
char str[1024];
if (!validate_VK_DESCRIPTOR_SET_USAGE(setUsage)) {
sprintf(str, "Parameter setUsage to function AllocDescriptorSets has invalid value of %i.", (int)setUsage);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
+ VkResult result = nextTable.AllocDescriptorSets(descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VK_DESCRIPTOR_POOL descriptorPool, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets)
+VK_LAYER_EXPORT void VKAPI vkClearDescriptorSets(VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets)
{
nextTable.ClearDescriptorSets(descriptorPool, count, pDescriptorSets);
}
-VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VK_DESCRIPTOR_SET descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
+VK_LAYER_EXPORT void VKAPI vkUpdateDescriptors(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** ppUpdateArray)
{
nextTable.UpdateDescriptors(descriptorSet, updateCount, ppUpdateArray);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, const VK_DYNAMIC_VP_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_VP_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicViewportState(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpStateObject* pState)
{
char str[1024];
if (!pCreateInfo) {
@@ -1027,11 +1027,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicViewportState(VK_DEVICE device, c
sprintf(str, "Parameter pCreateInfo to function CreateDynamicViewportState contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, const VK_DYNAMIC_RS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_RS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicRasterState(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsStateObject* pState)
{
char str[1024];
if (!pCreateInfo) {
@@ -1042,11 +1042,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicRasterState(VK_DEVICE device, con
sprintf(str, "Parameter pCreateInfo to function CreateDynamicRasterState contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device, const VK_DYNAMIC_CB_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_CB_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbStateObject* pState)
{
char str[1024];
if (!pCreateInfo) {
@@ -1057,11 +1057,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicColorBlendState(VK_DEVICE device,
sprintf(str, "Parameter pCreateInfo to function CreateDynamicColorBlendState contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE device, const VK_DYNAMIC_DS_STATE_CREATE_INFO* pCreateInfo, VK_DYNAMIC_DS_STATE_OBJECT* pState)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo, VkDynamicDsStateObject* pState)
{
char str[1024];
if (!pCreateInfo) {
@@ -1072,15 +1072,15 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateDynamicDepthStencilState(VK_DEVICE devic
sprintf(str, "Parameter pCreateInfo to function CreateDynamicDepthStencilState contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
+ VkResult result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
return result;
}
-void PreCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo)
+void PreCreateCommandBuffer(VkDevice device, const VkCmdBufferCreateInfo* pCreateInfo)
{
if(device == nullptr)
{
- char const str[] = "vkCreateCommandBuffer parameter, VK_DEVICE device, is "\
+ char const str[] = "vkCreateCommandBuffer parameter, VkDevice device, is "\
"nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1088,7 +1088,7 @@ void PreCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* p
if(pCreateInfo == nullptr)
{
- char const str[] = "vkCreateCommandBuffer parameter, VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, is "\
+ char const str[] = "vkCreateCommandBuffer parameter, VkCmdBufferCreateInfo* pCreateInfo, is "\
"nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1103,11 +1103,11 @@ void PreCreateCommandBuffer(VK_DEVICE device, const VK_CMD_BUFFER_CREATE_INFO* p
}
}
-void PostCreateCommandBuffer(VK_RESULT result, VK_CMD_BUFFER* pCmdBuffer)
+void PostCreateCommandBuffer(VkResult result, VkCmdBuffer* pCmdBuffer)
{
if(result != VK_SUCCESS)
{
- // TODO: Spit out VK_RESULT value.
+ // TODO: Spit out VkResult value.
char const str[] = "vkCreateCommandBuffer failed (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1115,22 +1115,22 @@ void PostCreateCommandBuffer(VK_RESULT result, VK_CMD_BUFFER* pCmdBuffer)
if(pCmdBuffer == nullptr)
{
- char const str[] = "vkCreateCommandBuffer parameter, VK_CMD_BUFFER* pCmdBuffer, is nullptr (postcondition).";
+ char const str[] = "vkCreateCommandBuffer parameter, VkCmdBuffer* pCmdBuffer, is nullptr (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateCommandBuffer(VK_DEVICE device,
- const VK_CMD_BUFFER_CREATE_INFO* pCreateInfo, VK_CMD_BUFFER* pCmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateCommandBuffer(VkDevice device,
+ const VkCmdBufferCreateInfo* pCreateInfo, VkCmdBuffer* pCmdBuffer)
{
PreCreateCommandBuffer(device, pCreateInfo);
- VK_RESULT result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
+ VkResult result = nextTable.CreateCommandBuffer(device, pCreateInfo, pCmdBuffer);
PostCreateCommandBuffer(result, pCmdBuffer);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, const VK_CMD_BUFFER_BEGIN_INFO* pBeginInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkBeginCommandBuffer(VkCmdBuffer cmdBuffer, const VkCmdBufferBeginInfo* pBeginInfo)
{
char str[1024];
if (!pBeginInfo) {
@@ -1141,25 +1141,25 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkBeginCommandBuffer(VK_CMD_BUFFER cmdBuffer, co
sprintf(str, "Parameter pBeginInfo to function BeginCommandBuffer contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
+ VkResult result = nextTable.BeginCommandBuffer(cmdBuffer, pBeginInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkEndCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkEndCommandBuffer(VkCmdBuffer cmdBuffer)
{
- VK_RESULT result = nextTable.EndCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.EndCommandBuffer(cmdBuffer);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkResetCommandBuffer(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkResetCommandBuffer(VkCmdBuffer cmdBuffer)
{
- VK_RESULT result = nextTable.ResetCommandBuffer(cmdBuffer);
+ VkResult result = nextTable.ResetCommandBuffer(cmdBuffer);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_PIPELINE pipeline)
+VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
{
char str[1024];
if (!validate_VK_PIPELINE_BIND_POINT(pipelineBindPoint)) {
@@ -1169,7 +1169,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindPipeline(VK_CMD_BUFFER cmdBuffer, VK_PIPELIN
nextTable.CmdBindPipeline(cmdBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer, VK_STATE_BIND_POINT stateBindPoint, VK_DYNAMIC_STATE_OBJECT state)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state)
{
char str[1024];
if (!validate_VK_STATE_BIND_POINT(stateBindPoint)) {
@@ -1179,7 +1179,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDynamicStateObject(VK_CMD_BUFFER cmdBuffer,
nextTable.CmdBindDynamicStateObject(cmdBuffer, stateBindPoint, state);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, VK_DESCRIPTOR_SET_LAYOUT_CHAIN layoutChain, uint32_t layoutChainSlot, uint32_t count, const VK_DESCRIPTOR_SET* pDescriptorSets, const uint32_t* pUserData)
+VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkDescriptorSetLayoutChain layoutChain, uint32_t layoutChainSlot, uint32_t count, const VkDescriptorSet* pDescriptorSets, const uint32_t* pUserData)
{
char str[1024];
if (!validate_VK_PIPELINE_BIND_POINT(pipelineBindPoint)) {
@@ -1189,13 +1189,13 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindDescriptorSets(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layoutChain, layoutChainSlot, count, pDescriptorSets, pUserData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t binding)
+VK_LAYER_EXPORT void VKAPI vkCmdBindVertexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t binding)
{
nextTable.CmdBindVertexBuffer(cmdBuffer, buffer, offset, binding);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, VK_INDEX_TYPE indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
{
char str[1024];
if (!validate_VK_INDEX_TYPE(indexType)) {
@@ -1205,43 +1205,43 @@ VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFF
nextTable.CmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDraw(VK_CMD_BUFFER cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDraw(VkCmdBuffer cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount)
{
nextTable.CmdDraw(cmdBuffer, firstVertex, vertexCount, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VK_CMD_BUFFER cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexed(VkCmdBuffer cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount)
{
nextTable.CmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
{
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VK_CMD_BUFFER cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatch(VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, uint32_t z)
{
nextTable.CmdDispatch(cmdBuffer, x, y, z);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VK_CMD_BUFFER cmdBuffer, VK_BUFFER buffer, VK_GPU_SIZE offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
{
nextTable.CmdDispatchIndirect(cmdBuffer, buffer, offset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
{
char str[1024];
uint32_t i;
@@ -1254,7 +1254,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER sr
nextTable.CmdCopyBuffer(cmdBuffer, srcBuffer, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageCopy* pRegions)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(srcImageLayout)) {
@@ -1275,7 +1275,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcI
nextTable.CmdCopyImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_IMAGE_BLIT* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageBlit* pRegions)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(srcImageLayout)) {
@@ -1296,7 +1296,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdBlitImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcI
nextTable.CmdBlitImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer, VK_BUFFER srcBuffer, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(destImageLayout)) {
@@ -1313,7 +1313,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyBufferToImage(VK_CMD_BUFFER cmdBuffer, VK_BU
nextTable.CmdCopyBufferToImage(cmdBuffer, srcBuffer, destImage, destImageLayout, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_BUFFER destBuffer, uint32_t regionCount, const VK_BUFFER_IMAGE_COPY* pRegions)
+VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(srcImageLayout)) {
@@ -1330,7 +1330,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdCopyImageToBuffer(VK_CMD_BUFFER cmdBuffer, VK_IM
nextTable.CmdCopyImageToBuffer(cmdBuffer, srcImage, srcImageLayout, destBuffer, regionCount, pRegions);
}
-VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout)
+VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(srcImageLayout)) {
@@ -1344,19 +1344,19 @@ VK_LAYER_EXPORT void VKAPI vkCmdCloneImageData(VK_CMD_BUFFER cmdBuffer, VK_IMAGE
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
{
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VK_CMD_BUFFER cmdBuffer, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset, VK_GPU_SIZE fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
{
nextTable.CmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, VK_CLEAR_COLOR color, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, VkClearColor color, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(imageLayout)) {
@@ -1373,7 +1373,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearColorImage(VK_CMD_BUFFER cmdBuffer, VK_IMAG
nextTable.CmdClearColorImage(cmdBuffer, image, imageLayout, color, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer, VK_IMAGE image, VK_IMAGE_LAYOUT imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VK_IMAGE_SUBRESOURCE_RANGE* pRanges)
+VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(imageLayout)) {
@@ -1390,7 +1390,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdClearDepthStencil(VK_CMD_BUFFER cmdBuffer, VK_IM
nextTable.CmdClearDepthStencil(cmdBuffer, image, imageLayout, depth, stencil, rangeCount, pRanges);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE srcImage, VK_IMAGE_LAYOUT srcImageLayout, VK_IMAGE destImage, VK_IMAGE_LAYOUT destImageLayout, uint32_t rectCount, const VK_IMAGE_RESOLVE* pRects)
+VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t rectCount, const VkImageResolve* pRects)
{
char str[1024];
if (!validate_VK_IMAGE_LAYOUT(srcImageLayout)) {
@@ -1411,7 +1411,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResolveImage(VK_CMD_BUFFER cmdBuffer, VK_IMAGE s
nextTable.CmdResolveImage(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout, rectCount, pRects);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
char str[1024];
if (!validate_VK_PIPE_EVENT(pipeEvent)) {
@@ -1421,7 +1421,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdSetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event
nextTable.CmdSetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT event, VK_PIPE_EVENT pipeEvent)
+VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent)
{
char str[1024];
if (!validate_VK_PIPE_EVENT(pipeEvent)) {
@@ -1431,7 +1431,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdResetEvent(VK_CMD_BUFFER cmdBuffer, VK_EVENT eve
nextTable.CmdResetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVENT_WAIT_INFO* pWaitInfo)
+VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo)
{
char str[1024];
if (!pWaitInfo) {
@@ -1445,7 +1445,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VK_CMD_BUFFER cmdBuffer, const VK_EVE
nextTable.CmdWaitEvents(cmdBuffer, pWaitInfo);
}
-VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const VK_PIPELINE_BARRIER* pBarrier)
+VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier)
{
char str[1024];
if (!pBarrier) {
@@ -1459,25 +1459,25 @@ VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdPipelineBarrier(cmdBuffer, pBarrier);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot, VK_FLAGS flags)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
{
nextTable.CmdBeginQuery(cmdBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t slot)
+VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot)
{
nextTable.CmdEndQuery(cmdBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VK_CMD_BUFFER cmdBuffer, VK_QUERY_POOL queryPool, uint32_t startQuery, uint32_t queryCount)
+VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount)
{
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMESTAMP_TYPE timestampType, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset)
{
char str[1024];
if (!validate_VK_TIMESTAMP_TYPE(timestampType)) {
@@ -1487,7 +1487,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VK_CMD_BUFFER cmdBuffer, VK_TIMES
nextTable.CmdWriteTimestamp(cmdBuffer, timestampType, destBuffer, destOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData)
{
char str[1024];
if (!validate_VK_PIPELINE_BIND_POINT(pipelineBindPoint)) {
@@ -1497,7 +1497,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdInitAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdInitAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER srcBuffer, VK_GPU_SIZE srcOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset)
{
char str[1024];
if (!validate_VK_PIPELINE_BIND_POINT(pipelineBindPoint)) {
@@ -1507,7 +1507,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdLoadAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, srcBuffer, srcOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_PIPELINE_BIND_POINT pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VK_BUFFER destBuffer, VK_GPU_SIZE destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset)
{
char str[1024];
if (!validate_VK_PIPELINE_BIND_POINT(pipelineBindPoint)) {
@@ -1517,7 +1517,7 @@ VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VK_CMD_BUFFER cmdBuffer, VK_P
nextTable.CmdSaveAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, destBuffer, destOffset);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(VK_DEVICE device, const VK_FRAMEBUFFER_CREATE_INFO* pCreateInfo, VK_FRAMEBUFFER* pFramebuffer)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer)
{
char str[1024];
if (!pCreateInfo) {
@@ -1528,16 +1528,16 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateFramebuffer(VK_DEVICE device, const VK_F
sprintf(str, "Parameter pCreateInfo to function CreateFramebuffer contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
+ VkResult result = nextTable.CreateFramebuffer(device, pCreateInfo, pFramebuffer);
return result;
}
-void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCreateInfo)
+void PreCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo)
{
if(pCreateInfo == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_RENDER_PASS_CREATE_INFO* pCreateInfo, is "\
+ char const str[] = "vkCreateRenderPass parameter, VkRenderPassCreateInfo* pCreateInfo, is "\
"nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1553,7 +1553,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!vk_validate_vk_rect(&pCreateInfo->renderArea))
{
- char const str[] = "vkCreateRenderPass parameter, VK_RECT pCreateInfo->renderArea, is invalid "\
+ char const str[] = "vkCreateRenderPass parameter, VkRect pCreateInfo->renderArea, is invalid "\
"(precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1561,7 +1561,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!vk_validate_vk_extent2d(&pCreateInfo->extent))
{
- char const str[] = "vkCreateRenderPass parameter, VK_EXTENT2D pCreateInfo->extent, is invalid "\
+ char const str[] = "vkCreateRenderPass parameter, VkExtent2D pCreateInfo->extent, is invalid "\
"(precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1569,7 +1569,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(pCreateInfo->pColorFormats == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_FORMAT* pCreateInfo->pColorFormats, "\
+ char const str[] = "vkCreateRenderPass parameter, VkFormat* pCreateInfo->pColorFormats, "\
"is nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1580,20 +1580,20 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_FORMAT(pCreateInfo->pColorFormats[i]))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->pColorFormats[" << i <<
+ ss << "vkCreateRenderPass parameter, VkFormat pCreateInfo->pColorFormats[" << i <<
"], is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
}
- VK_FORMAT_PROPERTIES properties;
+ VkFormatProperties properties;
size_t size = sizeof(properties);
- VK_RESULT result = nextTable.GetFormatInfo(device, pCreateInfo->pColorFormats[i],
+ VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->pColorFormats[i],
VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->pColorFormats[" << i <<
+ ss << "vkCreateRenderPass parameter, VkFormat pCreateInfo->pColorFormats[" << i <<
"], cannot be validated (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1602,7 +1602,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->pColorFormats[" << i <<
+ ss << "vkCreateRenderPass parameter, VkFormat pCreateInfo->pColorFormats[" << i <<
"], contains unsupported format (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1612,7 +1612,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(pCreateInfo->pColorLayouts == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_IMAGE_LAYOUT* pCreateInfo->pColorLayouts, "\
+ char const str[] = "vkCreateRenderPass parameter, VkImageLayout* pCreateInfo->pColorLayouts, "\
"is nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1623,7 +1623,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_IMAGE_LAYOUT(pCreateInfo->pColorLayouts[i]))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_IMAGE_LAYOUT pCreateInfo->pColorLayouts[" << i <<
+ ss << "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pColorLayouts[" << i <<
"], is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1632,7 +1632,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(pCreateInfo->pColorLoadOps == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_LOAD_OP* pCreateInfo->pColorLoadOps, "\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentLoadOp* pCreateInfo->pColorLoadOps, "\
"is nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1643,7 +1643,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_LOAD_OP(pCreateInfo->pColorLoadOps[i]))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_ATTACHMENT_LOAD_OP pCreateInfo->pColorLoadOps[" << i <<
+ ss << "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->pColorLoadOps[" << i <<
"], is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1652,7 +1652,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(pCreateInfo->pColorStoreOps == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_STORE_OP* pCreateInfo->pColorStoreOps, "\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentStoreOp* pCreateInfo->pColorStoreOps, "\
"is nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1663,7 +1663,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_STORE_OP(pCreateInfo->pColorStoreOps[i]))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_ATTACHMENT_STORE_OP pCreateInfo->pColorStoreOps[" << i <<
+ ss << "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->pColorStoreOps[" << i <<
"], is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1672,7 +1672,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(pCreateInfo->pColorLoadClearValues == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_CLEAR_COLOR* pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkClearColor* pCreateInfo->"\
"pColorLoadClearValues, is nullptr (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1711,7 +1711,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!vk_validate_vk_clear_color(&(pCreateInfo->pColorLoadClearValues[i])))
{
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VK_CLEAR_COLOR pCreateInfo->pColorLoadClearValues[" << i <<
+ ss << "vkCreateRenderPass parameter, VkClearColor pCreateInfo->pColorLoadClearValues[" << i <<
"], is invalid (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", ss.str().c_str());
continue;
@@ -1720,19 +1720,19 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_FORMAT(pCreateInfo->depthStencilFormat))
{
- char const str[] = "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkFormat pCreateInfo->"\
"depthStencilFormat, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
- VK_FORMAT_PROPERTIES properties;
+ VkFormatProperties properties;
size_t size = sizeof(properties);
- VK_RESULT result = nextTable.GetFormatInfo(device, pCreateInfo->depthStencilFormat,
+ VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->depthStencilFormat,
VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
- char const str[] = "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkFormat pCreateInfo->"\
"depthStencilFormat, cannot be validated (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1740,7 +1740,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0))
{
- char const str[] = "vkCreateRenderPass parameter, VK_FORMAT pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkFormat pCreateInfo->"\
"depthStencilFormat, contains unsupported format (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1748,7 +1748,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_IMAGE_LAYOUT(pCreateInfo->depthStencilLayout))
{
- char const str[] = "vkCreateRenderPass parameter, VK_IMAGE_LAYOUT pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->"\
"depthStencilLayout, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1756,7 +1756,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_LOAD_OP(pCreateInfo->depthLoadOp))
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_LOAD_OP pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->"\
"depthLoadOp, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1764,7 +1764,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_STORE_OP(pCreateInfo->depthStoreOp))
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_STORE_OP pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->"\
"depthStoreOp, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1772,7 +1772,7 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_LOAD_OP(pCreateInfo->stencilLoadOp))
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_LOAD_OP pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->"\
"stencilLoadOp, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1780,18 +1780,18 @@ void PreCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCr
if(!validate_VK_ATTACHMENT_STORE_OP(pCreateInfo->stencilStoreOp))
{
- char const str[] = "vkCreateRenderPass parameter, VK_ATTACHMENT_STORE_OP pCreateInfo->"\
+ char const str[] = "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->"\
"stencilStoreOp, is unrecognized (precondition).";
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-void PostCreateRenderPass(VK_RESULT result, VK_RENDER_PASS* pRenderPass)
+void PostCreateRenderPass(VkResult result, VkRenderPass* pRenderPass)
{
if(result != VK_SUCCESS)
{
- // TODO: Spit out VK_RESULT value.
+ // TODO: Spit out VkResult value.
char const str[] = "vkCreateRenderPass failed (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
@@ -1799,21 +1799,21 @@ void PostCreateRenderPass(VK_RESULT result, VK_RENDER_PASS* pRenderPass)
if(pRenderPass == nullptr)
{
- char const str[] = "vkCreateRenderPass parameter, VK_RENDER_PASS* pRenderPass, is nullptr (postcondition).";
+ char const str[] = "vkCreateRenderPass parameter, VkRenderPass* pRenderPass, is nullptr (postcondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkCreateRenderPass(VK_DEVICE device, const VK_RENDER_PASS_CREATE_INFO* pCreateInfo, VK_RENDER_PASS* pRenderPass)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass)
{
PreCreateRenderPass(device, pCreateInfo);
- VK_RESULT result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
+ VkResult result = nextTable.CreateRenderPass(device, pCreateInfo, pRenderPass);
PostCreateRenderPass(result, pRenderPass);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const VK_RENDER_PASS_BEGIN* pRenderPassBegin)
+VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VkCmdBuffer cmdBuffer, const VkRenderPassBegin* pRenderPassBegin)
{
char str[1024];
if (!pRenderPassBegin) {
@@ -1827,24 +1827,24 @@ VK_LAYER_EXPORT void VKAPI vkCmdBeginRenderPass(VK_CMD_BUFFER cmdBuffer, const V
nextTable.CmdBeginRenderPass(cmdBuffer, pRenderPassBegin);
}
-VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VK_CMD_BUFFER cmdBuffer, VK_RENDER_PASS renderPass)
+VK_LAYER_EXPORT void VKAPI vkCmdEndRenderPass(VkCmdBuffer cmdBuffer, VkRenderPass renderPass)
{
nextTable.CmdEndRenderPass(cmdBuffer, renderPass);
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetValidationLevel(VK_DEVICE device, VK_VALIDATION_LEVEL validationLevel)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetValidationLevel(VkDevice device, VkValidationLevel validationLevel)
{
char str[1024];
if (!validate_VK_VALIDATION_LEVEL(validationLevel)) {
sprintf(str, "Parameter validationLevel to function DbgSetValidationLevel has invalid value of %i.", (int)validationLevel);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VK_RESULT result = nextTable.DbgSetValidationLevel(device, validationLevel);
+ VkResult result = nextTable.DbgSetValidationLevel(device, validationLevel);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
{
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));
@@ -1858,11 +1858,11 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, V
if (g_actionIsDefault) {
g_debugAction = VK_DBG_LAYER_ACTION_CALLBACK;
}
- VK_RESULT result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
+ VkResult result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
VK_LAYER_DBG_FUNCTION_NODE *pTrav = g_pDbgFunctionHead;
VK_LAYER_DBG_FUNCTION_NODE *pPrev = pTrav;
@@ -1884,45 +1884,45 @@ VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance,
else
g_debugAction = (VK_LAYER_DBG_ACTION)(g_debugAction & ~((uint32_t)VK_DBG_LAYER_ACTION_CALLBACK));
}
- VK_RESULT result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
+ VkResult result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetMessageFilter(VK_DEVICE device, int32_t msgCode, VK_DBG_MSG_FILTER filter)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetMessageFilter(VkDevice device, int32_t msgCode, VK_DBG_MSG_FILTER filter)
{
- VK_RESULT result = nextTable.DbgSetMessageFilter(device, msgCode, filter);
+ VkResult result = nextTable.DbgSetMessageFilter(device, msgCode, filter);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetObjectTag(VK_BASE_OBJECT object, size_t tagSize, const void* pTag)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetObjectTag(VkBaseObject object, size_t tagSize, const void* pTag)
{
- VK_RESULT result = nextTable.DbgSetObjectTag(object, tagSize, pTag);
+ VkResult result = nextTable.DbgSetObjectTag(object, tagSize, pTag);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(VK_INSTANCE instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetGlobalOption(VkInstance instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
{
- VK_RESULT result = nextTable.DbgSetGlobalOption(instance, dbgOption, dataSize, pData);
+ VkResult result = nextTable.DbgSetGlobalOption(instance, dbgOption, dataSize, pData);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgSetDeviceOption(VK_DEVICE device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkDbgSetDeviceOption(VkDevice device, VK_DBG_DEVICE_OPTION dbgOption, size_t dataSize, const void* pData)
{
- VK_RESULT result = nextTable.DbgSetDeviceOption(device, dbgOption, dataSize, pData);
+ VkResult result = nextTable.DbgSetDeviceOption(device, dbgOption, dataSize, pData);
return result;
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VK_CMD_BUFFER cmdBuffer, const char* pMarker)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerBegin(VkCmdBuffer cmdBuffer, const char* pMarker)
{
nextTable.CmdDbgMarkerBegin(cmdBuffer, pMarker);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VK_CMD_BUFFER cmdBuffer)
+VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VkCmdBuffer cmdBuffer)
{
nextTable.CmdDbgMarkerEnd(cmdBuffer);
@@ -1930,41 +1930,41 @@ VK_LAYER_EXPORT void VKAPI vkCmdDbgMarkerEnd(VK_CMD_BUFFER cmdBuffer)
#if defined(__linux__) || defined(XCB_NVIDIA)
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11AssociateConnection(VK_PHYSICAL_GPU gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11AssociateConnection(VkPhysicalGpu gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
pCurObj = gpuw;
loader_platform_thread_once(&tabOnce, initParamChecker);
- VK_RESULT result = nextTable.WsiX11AssociateConnection((VK_PHYSICAL_GPU)gpuw->nextObject, pConnectionInfo);
+ VkResult result = nextTable.WsiX11AssociateConnection((VkPhysicalGpu)gpuw->nextObject, pConnectionInfo);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11GetMSC(VK_DEVICE device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11GetMSC(VkDevice device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc)
{
- VK_RESULT result = nextTable.WsiX11GetMSC(device, window, crtc, pMsc);
+ VkResult result = nextTable.WsiX11GetMSC(device, window, crtc, pMsc);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11CreatePresentableImage(VK_DEVICE device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VK_IMAGE* pImage, VK_GPU_MEMORY* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkGpuMemory* pMem)
{
- VK_RESULT result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
+ VkResult result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
return result;
}
-VK_LAYER_EXPORT VK_RESULT VKAPI vkWsiX11QueuePresent(VK_QUEUE queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VK_FENCE fence)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11QueuePresent(VkQueue queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VkFence fence)
{
- VK_RESULT result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
+ VkResult result = nextTable.WsiX11QueuePresent(queue, pPresentInfo, fence);
return result;
}
#endif
#include "vk_generic_intercept_proc_helper.h"
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
{
VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;
void* addr;
@@ -1979,7 +1979,7 @@ VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcN
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VK_PHYSICAL_GPU)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
}
}
diff --git a/loader/loader.c b/loader/loader.c
index af88c415..b314ccb9 100644
--- a/loader/loader.c
+++ b/loader/loader.c
@@ -79,12 +79,12 @@ struct loader_icd {
struct loader_scanned_icds {
loader_platform_dl_handle handle;
- vkGetProcAddrType GetProcAddr;
- vkCreateInstanceType CreateInstance;
- vkDestroyInstanceType DestroyInstance;
- vkEnumerateGpusType EnumerateGpus;
- vkGetExtensionSupportType GetExtensionSupport;
- VK_INSTANCE instance;
+ PFN_vkGetProcAddr GetProcAddr;
+ PFN_vkCreateInstance CreateInstance;
+ PFN_vkDestroyInstance DestroyInstance;
+ PFN_vkEnumerateGpus EnumerateGpus;
+ PFN_vkGetExtensionSupport GetExtensionSupport;
+ VkInstance instance;
struct loader_scanned_icds *next;
};
@@ -274,7 +274,7 @@ static void loader_scanned_icd_add(const char *filename)
}
#define LOOKUP(func_ptr, func) do { \
- func_ptr = (vk ##func## Type) loader_platform_get_proc_address(handle, "vk" #func); \
+ func_ptr = (PFN_vk ##func) loader_platform_get_proc_address(handle, "vk" #func); \
if (!func_ptr) { \
loader_log(VK_DBG_MSG_WARNING, 0, loader_platform_get_proc_address_error("vk" #func)); \
return; \
@@ -515,7 +515,7 @@ static void layer_lib_scan(void)
loader.layer_scanned = true;
}
-static void loader_init_dispatch_table(VK_LAYER_DISPATCH_TABLE *tab, vkGetProcAddrType fpGPA, VK_PHYSICAL_GPU gpu)
+static void loader_init_dispatch_table(VK_LAYER_DISPATCH_TABLE *tab, PFN_vkGetProcAddr fpGPA, VkPhysicalGpu gpu)
{
loader_initialize_dispatch_table(tab, fpGPA, gpu);
@@ -578,12 +578,12 @@ static void loader_init_layer_libs(struct loader_icd *icd, uint32_t gpu_index, s
}
}
-static VK_RESULT find_layer_extension(struct loader_icd *icd, uint32_t gpu_index, const char *pExtName, const char **lib_name)
+static VkResult find_layer_extension(struct loader_icd *icd, uint32_t gpu_index, const char *pExtName, const char **lib_name)
{
- VK_RESULT err;
+ VkResult err;
char *search_name;
loader_platform_dl_handle handle;
- vkGetExtensionSupportType fpGetExtensionSupport;
+ PFN_vkGetExtensionSupport fpGetExtensionSupport;
/*
* The loader provides the abstraction that make layers and extensions work via
@@ -596,7 +596,7 @@ static VK_RESULT find_layer_extension(struct loader_icd *icd, uint32_t gpu_index
// TODO: What if extension is in multiple places?
// TODO: Who should we ask first? Driver or layers? Do driver for now.
- err = icd->scanned_icds[gpu_index].GetExtensionSupport((VK_PHYSICAL_GPU) (icd->gpus[gpu_index].nextObject), pExtName);
+ err = icd->scanned_icds[gpu_index].GetExtensionSupport((VkPhysicalGpu) (icd->gpus[gpu_index].nextObject), pExtName);
if (err == VK_SUCCESS) {
if (lib_name) {
*lib_name = NULL;
@@ -614,7 +614,7 @@ static VK_RESULT find_layer_extension(struct loader_icd *icd, uint32_t gpu_index
if (fpGetExtensionSupport != NULL) {
// Found layer's GetExtensionSupport call
- err = fpGetExtensionSupport((VK_PHYSICAL_GPU) (icd->gpus + gpu_index), pExtName);
+ err = fpGetExtensionSupport((VkPhysicalGpu) (icd->gpus + gpu_index), pExtName);
loader_platform_close_library(handle);
@@ -788,7 +788,7 @@ static void loader_deactivate_layer(const struct loader_instance *instance)
}
}
-extern uint32_t loader_activate_layers(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo)
+extern uint32_t loader_activate_layers(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo)
{
uint32_t gpu_index;
uint32_t count;
@@ -803,7 +803,7 @@ extern uint32_t loader_activate_layers(VK_PHYSICAL_GPU gpu, const VkDeviceCreate
if (!loader_layers_activated(icd, gpu_index)) {
VK_BASE_LAYER_OBJECT *gpuObj = (VK_BASE_LAYER_OBJECT *) gpu;
VK_BASE_LAYER_OBJECT *nextGpuObj, *baseObj = gpuObj->baseObject;
- vkGetProcAddrType nextGPA = vkGetProcAddr;
+ PFN_vkGetProcAddr nextGPA = vkGetProcAddr;
count = loader_get_layer_libs(icd, gpu_index, pCreateInfo, &pLayerNames);
if (!count)
@@ -822,8 +822,8 @@ extern uint32_t loader_activate_layers(VK_PHYSICAL_GPU gpu, const VkDeviceCreate
char funcStr[256];
snprintf(funcStr, 256, "%sGetProcAddr",icd->layer_libs[gpu_index][i].name);
- if ((nextGPA = (vkGetProcAddrType) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, funcStr)) == NULL)
- nextGPA = (vkGetProcAddrType) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, "vkGetProcAddr");
+ if ((nextGPA = (PFN_vkGetProcAddr) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, funcStr)) == NULL)
+ nextGPA = (PFN_vkGetProcAddr) loader_platform_get_proc_address(icd->layer_libs[gpu_index][i].lib_handle, "vkGetProcAddr");
if (!nextGPA) {
loader_log(VK_DBG_MSG_ERROR, 0, "Failed to find vkGetProcAddr in layer %s", icd->layer_libs[gpu_index][i].name);
continue;
@@ -857,16 +857,16 @@ extern uint32_t loader_activate_layers(VK_PHYSICAL_GPU gpu, const VkDeviceCreate
return icd->layer_count[gpu_index];
}
-LOADER_EXPORT VK_RESULT VKAPI vkCreateInstance(
+LOADER_EXPORT VkResult VKAPI vkCreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
- VK_INSTANCE* pInstance)
+ VkInstance* pInstance)
{
static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_icd);
static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(once_layer);
struct loader_instance *ptr_instance = NULL;
struct loader_scanned_icds *scanned_icds;
struct loader_icd *icd;
- VK_RESULT res = VK_ERROR_INITIALIZATION_FAILED;
+ VkResult res = VK_ERROR_INITIALIZATION_FAILED;
/* Scan/discover all ICD libraries in a single-threaded manner */
loader_platform_thread_once(&once_icd, loader_icd_scan);
@@ -905,16 +905,16 @@ LOADER_EXPORT VK_RESULT VKAPI vkCreateInstance(
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
- *pInstance = (VK_INSTANCE) ptr_instance;
+ *pInstance = (VkInstance) ptr_instance;
return VK_SUCCESS;
}
-LOADER_EXPORT VK_RESULT VKAPI vkDestroyInstance(
- VK_INSTANCE instance)
+LOADER_EXPORT VkResult VKAPI vkDestroyInstance(
+ VkInstance instance)
{
struct loader_instance *ptr_instance = (struct loader_instance *) instance;
struct loader_scanned_icds *scanned_icds;
- VK_RESULT res;
+ VkResult res;
// Remove this instance from the list of instances:
struct loader_instance *prev = NULL;
@@ -955,24 +955,24 @@ LOADER_EXPORT VK_RESULT VKAPI vkDestroyInstance(
return VK_SUCCESS;
}
-LOADER_EXPORT VK_RESULT VKAPI vkEnumerateGpus(
+LOADER_EXPORT VkResult VKAPI vkEnumerateGpus(
- VK_INSTANCE instance,
+ VkInstance instance,
uint32_t maxGpus,
uint32_t* pGpuCount,
- VK_PHYSICAL_GPU* pGpus)
+ VkPhysicalGpu* pGpus)
{
struct loader_instance *ptr_instance = (struct loader_instance *) instance;
struct loader_icd *icd;
uint32_t count = 0;
- VK_RESULT res;
+ VkResult res;
//in spirit of VK don't error check on the instance parameter
icd = ptr_instance->icds;
while (icd) {
- VK_PHYSICAL_GPU gpus[VK_MAX_PHYSICAL_GPUS];
+ VkPhysicalGpu gpus[VK_MAX_PHYSICAL_GPUS];
VK_BASE_LAYER_OBJECT * wrapped_gpus;
- vkGetProcAddrType get_proc_addr = icd->scanned_icds->GetProcAddr;
+ PFN_vkGetProcAddr get_proc_addr = icd->scanned_icds->GetProcAddr;
uint32_t n, max = maxGpus - count;
if (max > VK_MAX_PHYSICAL_GPUS) {
@@ -1024,7 +1024,7 @@ LOADER_EXPORT VK_RESULT VKAPI vkEnumerateGpus(
return (count > 0) ? VK_SUCCESS : res;
}
-LOADER_EXPORT void * VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char * pName)
+LOADER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char * pName)
{
if (gpu == NULL) {
return NULL;
@@ -1046,7 +1046,7 @@ LOADER_EXPORT void * VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char * pName
}
}
-LOADER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char *pExtName)
+LOADER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char *pExtName)
{
uint32_t gpu_index;
struct loader_icd *icd = loader_get_icd((const VK_BASE_LAYER_OBJECT *) gpu, &gpu_index);
@@ -1057,14 +1057,14 @@ LOADER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const c
return find_layer_extension(icd, gpu_index, pExtName, NULL);
}
-LOADER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+LOADER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
uint32_t gpu_index;
size_t count = 0;
char *lib_name;
struct loader_icd *icd = loader_get_icd((const VK_BASE_LAYER_OBJECT *) gpu, &gpu_index);
loader_platform_dl_handle handle;
- vkEnumerateLayersType fpEnumerateLayers;
+ PFN_vkEnumerateLayers fpEnumerateLayers;
char layer_buf[16][256];
char * layers[16];
@@ -1108,7 +1108,7 @@ LOADER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxL
} else {
size_t cnt;
uint32_t n;
- VK_RESULT res;
+ VkResult res;
n = (uint32_t) ((maxStringSize < 256) ? maxStringSize : 256);
res = fpEnumerateLayers(NULL, 16, n, &cnt, layers, (char *) icd->gpus + gpu_index);
loader_platform_close_library(handle);
@@ -1130,11 +1130,11 @@ LOADER_EXPORT VK_RESULT VKAPI vkEnumerateLayers(VK_PHYSICAL_GPU gpu, size_t maxL
return VK_SUCCESS;
}
-LOADER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
+LOADER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)
{
const struct loader_icd *icd;
struct loader_instance *inst;
- VK_RESULT res;
+ VkResult res;
uint32_t gpu_idx;
if (instance == VK_NULL_HANDLE)
@@ -1181,9 +1181,9 @@ LOADER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_
return VK_SUCCESS;
}
-LOADER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
+LOADER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)
{
- VK_RESULT res = VK_SUCCESS;
+ VkResult res = VK_SUCCESS;
struct loader_instance *inst;
if (instance == VK_NULL_HANDLE)
return VK_ERROR_INVALID_HANDLE;
@@ -1200,7 +1200,7 @@ LOADER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, V
for (const struct loader_icd * icd = inst->icds; icd; icd = icd->next) {
for (uint32_t i = 0; i < icd->gpu_count; i++) {
- VK_RESULT r;
+ VkResult r;
r = (icd->loader_dispatch + i)->DbgUnregisterMsgCallback(icd->scanned_icds->instance, pfnMsgCallback);
if (r != VK_SUCCESS) {
res = r;
@@ -1210,9 +1210,9 @@ LOADER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, V
return res;
}
-LOADER_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(VK_INSTANCE instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
+LOADER_EXPORT VkResult VKAPI vkDbgSetGlobalOption(VkInstance instance, VK_DBG_GLOBAL_OPTION dbgOption, size_t dataSize, const void* pData)
{
- VK_RESULT res = VK_SUCCESS;
+ VkResult res = VK_SUCCESS;
struct loader_instance *inst;
if (instance == VK_NULL_HANDLE)
return VK_ERROR_INVALID_HANDLE;
@@ -1228,7 +1228,7 @@ LOADER_EXPORT VK_RESULT VKAPI vkDbgSetGlobalOption(VK_INSTANCE instance, VK_DBG_
return VK_ERROR_INVALID_HANDLE;
for (const struct loader_icd * icd = inst->icds; icd; icd = icd->next) {
for (uint32_t i = 0; i < icd->gpu_count; i++) {
- VK_RESULT r;
+ VkResult r;
r = (icd->loader_dispatch + i)->DbgSetGlobalOption(icd->scanned_icds->instance, dbgOption,
dataSize, pData);
/* unfortunately we cannot roll back */
diff --git a/loader/loader.h b/loader/loader.h
index f40a36a3..2967322b 100644
--- a/loader/loader.h
+++ b/loader/loader.h
@@ -65,16 +65,16 @@ static inline void loader_init_data(void *obj, const void *data)
loader_set_data(obj, data);
}
-static inline void *loader_unwrap_gpu(VK_PHYSICAL_GPU *gpu)
+static inline void *loader_unwrap_gpu(VkPhysicalGpu *gpu)
{
const VK_BASE_LAYER_OBJECT *wrap = (const VK_BASE_LAYER_OBJECT *) *gpu;
- *gpu = (VK_PHYSICAL_GPU) wrap->nextObject;
+ *gpu = (VkPhysicalGpu) wrap->nextObject;
return loader_get_data(wrap->baseObject);
}
-extern uint32_t loader_activate_layers(VK_PHYSICAL_GPU gpu, const VkDeviceCreateInfo* pCreateInfo);
+extern uint32_t loader_activate_layers(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo);
#define MAX_LAYER_LIBRARIES 64
#endif /* LOADER_H */
diff --git a/vk-generate.py b/vk-generate.py
index 6cccd0c9..6591b1c9 100755
--- a/vk-generate.py
+++ b/vk-generate.py
@@ -156,7 +156,7 @@ class LoaderEntrypointsSubcommand(Subcommand):
# declare local variables
func.append(" const VK_LAYER_DISPATCH_TABLE *disp;")
if proto.ret != 'void' and obj_setup:
- func.append(" VK_RESULT res;")
+ func.append(" VkResult res;")
func.append("")
# active layers before dispatching CreateDevice
@@ -168,7 +168,7 @@ class LoaderEntrypointsSubcommand(Subcommand):
# get dispatch table and unwrap GPUs
for param in proto.params:
stmt = ""
- if param.ty == "VK_PHYSICAL_GPU":
+ if param.ty == "VkPhysicalGpu":
stmt = "loader_unwrap_gpu(&%s);" % param.name
if param == proto.params[0]:
stmt = "disp = " + stmt
@@ -230,16 +230,16 @@ class DispatchTableOpsSubcommand(Subcommand):
stmts.append("table->%s = gpa; /* direct assignment */" %
proto.name)
else:
- stmts.append("table->%s = (vk%sType) gpa(gpu, \"vk%s\");" %
+ stmts.append("table->%s = (PFN_vk%s) gpa(gpu, \"vk%s\");" %
(proto.name, proto.name, proto.name))
stmts.append("#endif")
func = []
func.append("static inline void %s_initialize_dispatch_table(VK_LAYER_DISPATCH_TABLE *table,"
% self.prefix)
- func.append("%s vkGetProcAddrType gpa,"
+ func.append("%s PFN_vkGetProcAddr gpa,"
% (" " * len(self.prefix)))
- func.append("%s VK_PHYSICAL_GPU gpu)"
+ func.append("%s VkPhysicalGpu gpu)"
% (" " * len(self.prefix)))
func.append("{")
func.append(" %s" % "\n ".join(stmts))
diff --git a/vk-layer-generate.py b/vk-layer-generate.py
index ec8a97c7..c924cfd9 100755
--- a/vk-layer-generate.py
+++ b/vk-layer-generate.py
@@ -134,7 +134,7 @@ class Subcommand(object):
return ("%i", "*(%s)" % name)
return ("%i", name)
# TODO : This is special-cased as there's only one "format" param currently and it's nice to expand it
- if "VK_FORMAT" == vk_type:
+ if "VkFormat" == vk_type:
if cpp:
return ("%p", "&%s" % name)
return ("{%s.channelFormat = %%s, %s.numericFormat = %%s}" % (name, name), "string_VK_CHANNEL_FORMAT(%s.channelFormat), string_VK_NUM_FORMAT(%s.numericFormat)" % (name, name))
@@ -146,7 +146,7 @@ class Subcommand(object):
def _gen_layer_dbg_callback_register(self):
r_body = []
- r_body.append('VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgRegisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)')
+ r_body.append('VK_LAYER_EXPORT VkResult VKAPI vkDbgRegisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback, void* pUserData)')
r_body.append('{')
r_body.append(' // This layer intercepts callbacks')
r_body.append(' VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));')
@@ -160,14 +160,14 @@ class Subcommand(object):
r_body.append(' if (g_actionIsDefault) {')
r_body.append(' g_debugAction = VK_DBG_LAYER_ACTION_CALLBACK;')
r_body.append(' }')
- r_body.append(' VK_RESULT result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);')
+ r_body.append(' VkResult result = nextTable.DbgRegisterMsgCallback(instance, pfnMsgCallback, pUserData);')
r_body.append(' return result;')
r_body.append('}')
return "\n".join(r_body)
def _gen_layer_dbg_callback_unregister(self):
ur_body = []
- ur_body.append('VK_LAYER_EXPORT VK_RESULT VKAPI vkDbgUnregisterMsgCallback(VK_INSTANCE instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)')
+ ur_body.append('VK_LAYER_EXPORT VkResult VKAPI vkDbgUnregisterMsgCallback(VkInstance instance, VK_DBG_MSG_CALLBACK_FUNCTION pfnMsgCallback)')
ur_body.append('{')
ur_body.append(' VK_LAYER_DBG_FUNCTION_NODE *pTrav = g_pDbgFunctionHead;')
ur_body.append(' VK_LAYER_DBG_FUNCTION_NODE *pPrev = pTrav;')
@@ -189,16 +189,16 @@ class Subcommand(object):
ur_body.append(' else')
ur_body.append(' g_debugAction &= ~VK_DBG_LAYER_ACTION_CALLBACK;')
ur_body.append(' }')
- ur_body.append(' VK_RESULT result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);')
+ ur_body.append(' VkResult result = nextTable.DbgUnregisterMsgCallback(instance, pfnMsgCallback);')
ur_body.append(' return result;')
ur_body.append('}')
return "\n".join(ur_body)
def _gen_layer_get_extension_support(self, layer="Generic"):
ges_body = []
- ges_body.append('VK_LAYER_EXPORT VK_RESULT VKAPI vkGetExtensionSupport(VK_PHYSICAL_GPU gpu, const char* pExtName)')
+ ges_body.append('VK_LAYER_EXPORT VkResult VKAPI vkGetExtensionSupport(VkPhysicalGpu gpu, const char* pExtName)')
ges_body.append('{')
- ges_body.append(' VK_RESULT result;')
+ ges_body.append(' VkResult result;')
ges_body.append(' VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;')
ges_body.append('')
ges_body.append(' /* This entrypoint is NOT going to init its own dispatch table since loader calls here early */')
@@ -207,7 +207,7 @@ class Subcommand(object):
ges_body.append(' result = VK_SUCCESS;')
ges_body.append(' } else if (nextTable.GetExtensionSupport != NULL)')
ges_body.append(' {')
- ges_body.append(' result = nextTable.GetExtensionSupport((VK_PHYSICAL_GPU)gpuw->nextObject, pExtName);')
+ ges_body.append(' result = nextTable.GetExtensionSupport((VkPhysicalGpu)gpuw->nextObject, pExtName);')
ges_body.append(' } else')
ges_body.append(' {')
ges_body.append(' result = VK_ERROR_INVALID_EXTENSION;')
@@ -270,7 +270,7 @@ class Subcommand(object):
exts.append(' return (type == VK_OBJECT_TYPE_ANY) ? numTotalObjs : numObjs[type];')
exts.append('}')
exts.append('')
- exts.append('VK_RESULT objTrackGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray)')
+ exts.append('VkResult objTrackGetObjects(VK_OBJECT_TYPE type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray)')
exts.append('{')
exts.append(" // This bool flags if we're pulling all objs or just a single class of objs")
exts.append(' bool32_t bAllObjs = (type == VK_OBJECT_TYPE_ANY);')
@@ -300,7 +300,7 @@ class Subcommand(object):
def _generate_layer_gpa_function(self, extensions=[]):
func_body = []
- func_body.append("VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VK_PHYSICAL_GPU gpu, const char* funcName)\n"
+ func_body.append("VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)\n"
"{\n"
" VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) gpu;\n"
" void* addr;\n"
@@ -319,7 +319,7 @@ class Subcommand(object):
func_body.append(" else {\n"
" if (gpuw->pGPA == NULL)\n"
" return NULL;\n"
- " return gpuw->pGPA((VK_PHYSICAL_GPU)gpuw->nextObject, funcName);\n"
+ " return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);\n"
" }\n"
"}\n")
return "\n".join(func_body)
@@ -345,11 +345,11 @@ class Subcommand(object):
func_body.append(' g_logFile = stdout;')
func_body.append(' }')
func_body.append('')
- func_body.append(' vkGetProcAddrType fpNextGPA;\n'
+ func_body.append(' PFN_vkGetProcAddr fpNextGPA;\n'
' fpNextGPA = pCurObj->pGPA;\n'
' assert(fpNextGPA);\n')
- func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);")
+ func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);")
if lockname is not None:
func_body.append(" if (!%sLockInitialized)" % lockname)
func_body.append(" {")
@@ -364,11 +364,11 @@ class Subcommand(object):
func_body = ["#include \"vk_dispatch_table_helper.h\""]
func_body.append('static void init%s(void)\n'
'{\n'
- ' vkGetProcAddrType fpNextGPA;\n'
+ ' PFN_vkGetProcAddr fpNextGPA;\n'
' fpNextGPA = pCurObj->pGPA;\n'
' assert(fpNextGPA);\n' % self.layer_name);
- func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);\n")
+ func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);\n")
func_body.append(" if (!printLockInitialized)")
func_body.append(" {")
func_body.append(" // TODO/TBD: Need to delete this mutex sometime. How???")
@@ -406,12 +406,12 @@ class GenericLayerSubcommand(Subcommand):
stmt = ''
funcs = []
if proto.ret != "void":
- ret_val = "VK_RESULT result = "
+ ret_val = "VkResult result = "
stmt = " return result;\n"
if 'WsiX11AssociateConnection' == proto.name:
funcs.append("#if defined(__linux__) || defined(XCB_NVIDIA)")
if proto.name == "EnumerateLayers":
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' char str[1024];\n'
@@ -435,14 +435,14 @@ class GenericLayerSubcommand(Subcommand):
' return VK_SUCCESS;\n'
' }\n'
'}' % (qual, decl, proto.params[0].name, proto.name, self.layer_name, ret_val, c_call, proto.name, stmt, self.layer_name))
- elif proto.params[0].ty != "VK_PHYSICAL_GPU":
+ elif proto.params[0].ty != "VkPhysicalGpu":
funcs.append('%s%s\n'
'{\n'
' %snextTable.%s;\n'
'%s'
'}' % (qual, decl, ret_val, proto.c_call(), stmt))
else:
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' char str[1024];'
@@ -586,10 +586,10 @@ class APIDumpSubcommand(Subcommand):
func_body.append('')
func_body.append(' ConfigureOutputStream(writeToFile, flushAfterWrite);')
func_body.append('')
- func_body.append(' vkGetProcAddrType fpNextGPA;')
+ func_body.append(' PFN_vkGetProcAddr fpNextGPA;')
func_body.append(' fpNextGPA = pCurObj->pGPA;')
func_body.append(' assert(fpNextGPA);')
- func_body.append(' layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VK_PHYSICAL_GPU) pCurObj->nextObject);')
+ func_body.append(' layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);')
func_body.append('')
func_body.append(' if (!printLockInitialized)')
func_body.append(' {')
@@ -614,7 +614,7 @@ class APIDumpSubcommand(Subcommand):
elif 'Create' in proto.name or 'Alloc' in proto.name or 'MapMemory' in proto.name:
create_params = -1
if proto.ret != "void":
- ret_val = "VK_RESULT result = "
+ ret_val = "VkResult result = "
stmt = " return result;\n"
f_open = 'loader_platform_thread_lock_mutex(&printLock);\n '
log_func = ' if (StreamControl::writeAddress == true) {'
@@ -653,8 +653,8 @@ class APIDumpSubcommand(Subcommand):
log_func = log_func.strip(', ')
log_func_no_addr = log_func_no_addr.strip(', ')
if proto.ret != "void":
- log_func += ') = " << string_VK_RESULT((VK_RESULT)result) << endl'
- log_func_no_addr += ') = " << string_VK_RESULT((VK_RESULT)result) << endl'
+ log_func += ') = " << string_VkResult((VkResult)result) << endl'
+ log_func_no_addr += ') = " << string_VkResult((VkResult)result) << endl'
else:
log_func += ')\\n"'
log_func_no_addr += ')\\n"'
@@ -697,7 +697,7 @@ class APIDumpSubcommand(Subcommand):
if 'WsiX11AssociateConnection' == proto.name:
funcs.append("#if defined(__linux__) || defined(XCB_NVIDIA)")
if proto.name == "EnumerateLayers":
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' using namespace StreamControl;\n'
@@ -718,11 +718,11 @@ class APIDumpSubcommand(Subcommand):
' }\n'
'}' % (qual, decl, proto.params[0].name, self.layer_name, ret_val, c_call,f_open, log_func, f_close, stmt, self.layer_name))
elif 'GetExtensionSupport' == proto.name:
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) %s;\n'
- ' VK_RESULT result;\n'
+ ' VkResult result;\n'
' /* This entrypoint is NOT going to init its own dispatch table since loader calls here early */\n'
' if (!strncmp(pExtName, "%s", strlen("%s")))\n'
' {\n'
@@ -737,7 +737,7 @@ class APIDumpSubcommand(Subcommand):
' }\n'
'%s'
'}' % (qual, decl, proto.params[0].name, self.layer_name, self.layer_name, c_call, f_open, log_func, f_close, stmt))
- elif proto.params[0].ty != "VK_PHYSICAL_GPU":
+ elif proto.params[0].ty != "VkPhysicalGpu":
funcs.append('%s%s\n'
'{\n'
' using namespace StreamControl;\n'
@@ -746,7 +746,7 @@ class APIDumpSubcommand(Subcommand):
'%s'
'}' % (qual, decl, ret_val, proto.c_call(), f_open, log_func, f_close, stmt))
else:
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' using namespace StreamControl;\n'
@@ -879,7 +879,7 @@ class ObjectTrackerSubcommand(Subcommand):
header_txt.append(' if (0) ll_print_lists();')
header_txt.append('}')
header_txt.append('// Traverse global list and return type for given object')
- header_txt.append('static VK_OBJECT_TYPE ll_get_obj_type(VK_OBJECT object) {')
+ header_txt.append('static VK_OBJECT_TYPE ll_get_obj_type(VkObject object) {')
header_txt.append(' objNode *pTrav = pGlobalHead;')
header_txt.append(' while (pTrav) {')
header_txt.append(' if (pTrav->obj.pObj == object)')
@@ -1067,7 +1067,7 @@ class ObjectTrackerSubcommand(Subcommand):
header_txt.append('}')
header_txt.append('')
header_txt.append('static void setGpuQueueInfoState(void *pData) {')
- header_txt.append(' maxMemReferences = ((VK_PHYSICAL_GPU_QUEUE_PROPERTIES *)pData)->maxMemReferences;')
+ header_txt.append(' maxMemReferences = ((VkPhysicalGpu_QUEUE_PROPERTIES *)pData)->maxMemReferences;')
header_txt.append('}')
return "\n".join(header_txt)
@@ -1077,7 +1077,7 @@ class ObjectTrackerSubcommand(Subcommand):
return None
obj_type_mapping = {base_t : base_t.replace("VK_", "VK_OBJECT_TYPE_") for base_t in vulkan.object_type_list}
# For the various "super-types" we have to use function to distinguish sub type
- for obj_type in ["VK_BASE_OBJECT", "VK_OBJECT", "VK_DYNAMIC_STATE_OBJECT"]:
+ for obj_type in ["VK_BASE_OBJECT", "VK_OBJECT", "VK_DYNAMIC_STATE_OBJECT", "VkObject", "VkBaseObject"]:
obj_type_mapping[obj_type] = "ll_get_obj_type(object)"
decl = proto.c_func(prefix="vk", attr="VKAPI")
@@ -1157,12 +1157,12 @@ class ObjectTrackerSubcommand(Subcommand):
ret_val = ''
stmt = ''
if proto.ret != "void":
- ret_val = "VK_RESULT result = "
+ ret_val = "VkResult result = "
stmt = " return result;\n"
if 'WsiX11AssociateConnection' == proto.name:
funcs.append("#if defined(__linux__) || defined(XCB_NVIDIA)")
if proto.name == "EnumerateLayers":
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' if (gpu != NULL) {\n'
@@ -1183,11 +1183,11 @@ class ObjectTrackerSubcommand(Subcommand):
' }\n'
'}' % (qual, decl, proto.params[0].name, using_line, self.layer_name, ret_val, c_call, create_line, destroy_line, stmt, self.layer_name))
elif 'GetExtensionSupport' == proto.name:
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' VK_BASE_LAYER_OBJECT* gpuw = (VK_BASE_LAYER_OBJECT *) %s;\n'
- ' VK_RESULT result;\n'
+ ' VkResult result;\n'
' /* This entrypoint is NOT going to init its own dispatch table since loader calls this early */\n'
' if (!strncmp(pExtName, "%s", strlen("%s")) ||\n'
' !strncmp(pExtName, "objTrackGetObjectCount", strlen("objTrackGetObjectCount")) ||\n'
@@ -1204,7 +1204,7 @@ class ObjectTrackerSubcommand(Subcommand):
' }\n'
'%s'
'}' % (qual, decl, proto.params[0].name, self.layer_name, self.layer_name, using_line, c_call, stmt))
- elif proto.params[0].ty != "VK_PHYSICAL_GPU":
+ elif proto.params[0].ty != "VkPhysicalGpu":
funcs.append('%s%s\n'
'{\n'
'%s'
@@ -1213,7 +1213,7 @@ class ObjectTrackerSubcommand(Subcommand):
'%s'
'}' % (qual, decl, using_line, ret_val, proto.c_call(), create_line, destroy_line, stmt))
else:
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VK_PHYSICAL_GPU)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
gpu_state = ''
if 'GetGpuInfo' in proto.name:
gpu_state = ' if (infoType == VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES) {\n'
diff --git a/vk_helper.py b/vk_helper.py
index f8cf2a80..88951735 100755
--- a/vk_helper.py
+++ b/vk_helper.py
@@ -298,26 +298,20 @@ def recreate_structs():
sys.stdout.write(";\n\n")
#
+# TODO: Fix construction of struct name
def get_struct_name_from_struct_type(struct_type):
+ # Note: All struct types are now camel-case
caps_struct_name = struct_type.replace("_STRUCTURE_TYPE", "")
- # NOTE: These must stay in caps as they are looking at the VK_STRUCTURE_TYPE_*_CREATE_INFO
- # and that has not changed to camel case
- exceptions_list = ['VK_DEVICE_CREATE_INFO', 'VK_INSTANCE_CREATE_INFO', 'VK_LAYER_CREATE_INFO',
- 'VK_MEMORY_ALLOC_INFO', 'VK_MEMORY_ALLOC_BUFFER_INFO', 'VK_MEMORY_ALLOC_IMAGE_INFO',
- 'VK_BUFFER_CREATE_INFO', 'VK_BUFFER_VIEW_CREATE_INFO']
- if caps_struct_name in exceptions_list:
- char_idx = 0
- struct_name = ''
- for char in caps_struct_name:
- if (0 == char_idx) or (caps_struct_name[char_idx-1] == '_'):
- struct_name += caps_struct_name[char_idx]
- elif (caps_struct_name[char_idx] == '_'):
- pass
- else:
- struct_name += caps_struct_name[char_idx].lower()
- char_idx += 1
- else:
- struct_name = caps_struct_name
+ char_idx = 0
+ struct_name = ''
+ for char in caps_struct_name:
+ if (0 == char_idx) or (caps_struct_name[char_idx-1] == '_'):
+ struct_name += caps_struct_name[char_idx]
+ elif (caps_struct_name[char_idx] == '_'):
+ pass
+ else:
+ struct_name += caps_struct_name[char_idx].lower()
+ char_idx += 1
return struct_name
# class for writing common file elements
@@ -526,11 +520,11 @@ class StructWrapperGen:
def _generateDynamicPrintFunctions(self):
dp_funcs = []
dp_funcs.append("\nvoid dynamic_display_full_txt(const void* pStruct, uint32_t indent)\n{\n // Cast to APP_INFO ptr initially just to pull sType off struct")
- dp_funcs.append(" VK_STRUCTURE_TYPE sType = ((VK_APPLICATION_INFO*)pStruct)->sType;\n")
+ dp_funcs.append(" VkStructureType sType = ((VkApplicationInfo*)pStruct)->sType;\n")
dp_funcs.append(" switch (sType)\n {")
for e in enum_type_dict:
class_num = 0
- if "_STRUCTURE_TYPE" in e:
+ if "StructureType" in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
class_name = self.get_class_name(struct_name)
@@ -592,14 +586,14 @@ class StructWrapperGen:
member_post = ' ? "TRUE" : "FALSE"'
elif 'float' in struct_member['type']:
print_type = "f"
- elif 'uint64' in struct_member['type']:
- print_type = "lu"
+ elif 'uint64' in struct_member['type'] or 'gpusize' in struct_member['type'].lower():
+ print_type = '" PRId64 "'
elif 'uint8' in struct_member['type']:
print_type = "hu"
- elif '_size' in struct_member['type']:
+ elif 'size' in struct_member['type'].lower():
print_type = '" PRINTF_SIZE_T_SPECIFIER "'
print_delimiter = ""
- elif True in [ui_str.lower() in struct_member['type'].lower() for ui_str in ['uint', '_FLAGS', '_SAMPLE_MASK']]:
+ elif True in [ui_str.lower() in struct_member['type'].lower() for ui_str in ['uint', 'flags', 'samplemask']]:
print_type = "u"
elif 'int' in struct_member['type']:
print_type = "i"
@@ -723,11 +717,11 @@ class StructWrapperGen:
sh_funcs.append(" if (pStruct == NULL) {")
sh_funcs.append(" return NULL;")
sh_funcs.append(" }")
- sh_funcs.append(" VK_STRUCTURE_TYPE sType = ((VK_APPLICATION_INFO*)pStruct)->sType;")
+ sh_funcs.append(" VkStructureType sType = ((VkApplicationInfo*)pStruct)->sType;")
sh_funcs.append(' char indent[100];\n strcpy(indent, " ");\n strcat(indent, prefix);')
sh_funcs.append(" switch (sType)\n {")
for e in enum_type_dict:
- if "_STRUCTURE_TYPE" in e:
+ if "StructureType" in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
print_func_name = self._get_sh_func_name(struct_name)
@@ -750,6 +744,7 @@ class StructWrapperGen:
# create and return final string
sh_funcs = []
# First generate prototypes for every struct
+ # XXX - REMOVE this comment
for s in sorted(self.struct_dict):
sh_funcs.append('string %s(const %s* pStruct, const string prefix);' % (self._get_sh_func_name(s), typedef_fwd_dict[s]))
sh_funcs.append('\n')
@@ -892,22 +887,23 @@ class StructWrapperGen:
sh_funcs.append(" if (pStruct == NULL) {\n")
sh_funcs.append(" return NULL;")
sh_funcs.append(" }\n")
- sh_funcs.append(" VK_STRUCTURE_TYPE sType = ((VK_APPLICATION_INFO*)pStruct)->sType;")
+ sh_funcs.append(" VkStructureType sType = ((VkApplicationInfo*)pStruct)->sType;")
sh_funcs.append(' string indent = " ";')
sh_funcs.append(' indent += prefix;')
sh_funcs.append(" switch (sType)\n {")
for e in enum_type_dict:
- if "_STRUCTURE_TYPE" in e:
+ if "StructureType" in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
print_func_name = self._get_sh_func_name(struct_name)
+ #sh_funcs.append('string %s(const %s* pStruct, const string prefix);' % (self._get_sh_func_name(s), typedef_fwd_dict[s]))
sh_funcs.append(' case %s:\n {' % (v))
sh_funcs.append(' return %s((%s*)pStruct, indent);' % (print_func_name, struct_name))
sh_funcs.append(' }')
sh_funcs.append(' break;')
sh_funcs.append(" default:")
sh_funcs.append(" return NULL;")
- sh_funcs.append(" }")
+ sh_funcs.append(" }")
sh_funcs.append("}")
return "\n".join(sh_funcs)
@@ -1056,7 +1052,7 @@ class StructWrapperGen:
for s in sorted(self.struct_dict):
sh_funcs.append('uint32_t %s(const %s* pStruct)\n{' % (self._get_vh_func_name(s), typedef_fwd_dict[s]))
for m in sorted(self.struct_dict[s]):
- # TODO : Need to handle arrays of enums like in VK_RENDER_PASS_CREATE_INFO struct
+ # TODO : Need to handle arrays of enums like in VkRenderPassCreateInfo struct
if is_type(self.struct_dict[s][m]['type'], 'enum') and not self.struct_dict[s][m]['ptr']:
sh_funcs.append(' if (!validate_%s(pStruct->%s))\n return 0;' % (self.struct_dict[s][m]['type'], self.struct_dict[s][m]['name']))
# TODO : Need a little refinement to this code to make sure type of struct matches expected input (ptr, const...)
@@ -1107,7 +1103,7 @@ class StructWrapperGen:
if not is_type(self.struct_dict[s][m]['type'], 'struct') and not 'char' in self.struct_dict[s][m]['type'].lower():
if 'ppMemBarriers' == self.struct_dict[s][m]['name']:
# TODO : For now be conservative and consider all memBarrier ptrs as largest possible struct
- sh_funcs.append('%sstructSize += pStruct->%s*(sizeof(%s*) + sizeof(VK_IMAGE_MEMORY_BARRIER));' % (indent, self.struct_dict[s][m]['array_size'], self.struct_dict[s][m]['type']))
+ sh_funcs.append('%sstructSize += pStruct->%s*(sizeof(%s*) + sizeof(VkImageMemoryBarrier));' % (indent, self.struct_dict[s][m]['array_size'], self.struct_dict[s][m]['type']))
else:
sh_funcs.append('%sstructSize += pStruct->%s*(sizeof(%s*) + sizeof(%s));' % (indent, self.struct_dict[s][m]['array_size'], self.struct_dict[s][m]['type'], self.struct_dict[s][m]['type']))
else: # This is an array of char* or array of struct ptrs
@@ -1154,8 +1150,8 @@ class StructWrapperGen:
else:
sh_funcs.append('size_t get_dynamic_struct_size(const void* pStruct)\n{')
indent = ' '
- sh_funcs.append('%s// Just use VK_APPLICATION_INFO as struct until actual type is resolved' % (indent))
- sh_funcs.append('%sVK_APPLICATION_INFO* pNext = (VK_APPLICATION_INFO*)pStruct;' % (indent))
+ sh_funcs.append('%s// Just use VkApplicationInfo as struct until actual type is resolved' % (indent))
+ sh_funcs.append('%sVkApplicationInfo* pNext = (VkApplicationInfo*)pStruct;' % (indent))
sh_funcs.append('%ssize_t structSize = 0;' % (indent))
if follow_chain:
sh_funcs.append('%swhile (pNext) {' % (indent))
@@ -1163,7 +1159,7 @@ class StructWrapperGen:
sh_funcs.append('%sswitch (pNext->sType) {' % (indent))
indent += ' '
for e in enum_type_dict:
- if '_STRUCTURE_TYPE' in e:
+ if 'StructureType' in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
sh_funcs.append('%scase %s:' % (indent, v))
@@ -1181,7 +1177,7 @@ class StructWrapperGen:
indent = indent[:-4]
sh_funcs.append('%s}' % (indent))
if follow_chain:
- sh_funcs.append('%spNext = (VK_APPLICATION_INFO*)pNext->pNext;' % (indent))
+ sh_funcs.append('%spNext = (VkApplicationInfo*)pNext->pNext;' % (indent))
indent = indent[:-4]
sh_funcs.append('%s}' % (indent))
sh_funcs.append('%sreturn structSize;\n}' % indent)
@@ -1388,8 +1384,8 @@ class GraphVizGen:
array_index = ""
member_print_post = ""
print_delimiter = "%"
- if struct_member['array'] and 'CHAR' in struct_member['type']: # just print char array as string
- print_type = "s"
+ if struct_member['array'] and 'char' in struct_member['type'].lower(): # just print char array as string
+ print_type = "p"
print_array = False
elif struct_member['array'] and not print_array:
# Just print base address of array when not full print_array
@@ -1408,14 +1404,14 @@ class GraphVizGen:
member_post = ' ? "TRUE" : "FALSE"'
elif 'float' in struct_member['type']:
print_type = "f"
- elif 'uint64' in struct_member['type']:
- print_type = "lu"
+ elif 'uint64' in struct_member['type'] or 'gpusize' in struct_member['type'].lower():
+ print_type = '" PRId64 "'
elif 'uint8' in struct_member['type']:
print_type = "hu"
- elif '_SIZE' in struct_member['type']:
+ elif 'size' in struct_member['type'].lower():
print_type = '" PRINTF_SIZE_T_SPECIFIER "'
print_delimiter = ""
- elif True in [ui_str in struct_member['type'] for ui_str in ['uint', '_FLAGS', '_SAMPLE_MASK']]:
+ elif True in [ui_str.lower() in struct_member['type'].lower() for ui_str in ['uint', 'flags', 'samplemask']]:
print_type = "u"
elif 'int' in struct_member['type']:
print_type = "i"
@@ -1429,21 +1425,21 @@ class GraphVizGen:
array_index = " i,"
member_post = "[i]"
print_out = "<TR><TD>%%s%s%s</TD><TD%s>%s%s%s</TD></TR>" % (member_name, member_print_post, port_label, print_delimiter, print_type, postfix) # section of print that goes inside of quotes
- print_arg = ", %s,%s %s(%s%s%s)%s" % (pre_var_name, array_index, cast_type, struct_var_name, struct_op, member_name, member_post) # section of print passed to portion in quotes
+ print_arg = ", %s,%s %s(%s%s%s)%s\n" % (pre_var_name, array_index, cast_type, struct_var_name, struct_op, member_name, member_post) # section of print passed to portion in quotes
return (print_out, print_arg)
def _generateBody(self):
gv_funcs = []
array_func_list = [] # structs for which we'll generate an array version of their print function
- array_func_list.append('vk_buffer_view_attach_info')
- array_func_list.append('vk_image_view_attach_info')
- array_func_list.append('vk_sampler_image_view_info')
- array_func_list.append('vk_descriptor_type_count')
+ array_func_list.append('vkbufferviewattachinfo')
+ array_func_list.append('vkimageviewattachinfo')
+ array_func_list.append('vksamplerimageviewinfo')
+ array_func_list.append('vkdescriptortypecount')
# For first pass, generate prototype
for s in sorted(self.struct_dict):
gv_funcs.append('char* %s(const %s* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
if s.lower().strip("_") in array_func_list:
- if s.lower().strip("_") in ['vk_buffer_view_attach_info', 'vk_image_view_attach_info']:
+ if s.lower().strip("_") in ['vkbufferviewattachinfo', 'vkimageviewattachinfo']:
gv_funcs.append('char* %s_array(uint32_t count, const %s* const* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
else:
gv_funcs.append('char* %s_array(uint32_t count, const %s* pStruct, const char* myNodeName);\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
@@ -1528,7 +1524,7 @@ class GraphVizGen:
gv_funcs.append(" return str;\n}\n")
if s.lower().strip("_") in array_func_list:
ptr_array = False
- if s.lower().strip("_") in ['vk_buffer_view_attach_info', 'vk_image_view_attach_info']:
+ if s.lower().strip("_") in ['vkbufferviewattachinfo', 'vkimageviewattachinfo']:
ptr_array = True
gv_funcs.append('char* %s_array(uint32_t count, const %s* const* pStruct, const char* myNodeName)\n{\n char* str;\n char tmpStr[1024];\n' % (self._get_gv_func_name(s), typedef_fwd_dict[s]))
else:
@@ -1562,21 +1558,21 @@ class GraphVizGen:
# Add function to dynamically print out unknown struct
gv_funcs.append("char* dynamic_gv_display(const void* pStruct, const char* nodeName)\n{\n")
gv_funcs.append(" // Cast to APP_INFO ptr initially just to pull sType off struct\n")
- gv_funcs.append(" VK_STRUCTURE_TYPE sType = ((VK_APPLICATION_INFO*)pStruct)->sType;\n")
+ gv_funcs.append(" VkStructureType sType = ((VkApplicationInfo*)pStruct)->sType;\n")
gv_funcs.append(" switch (sType)\n {\n")
for e in enum_type_dict:
- if "_STRUCTURE_TYPE" in e:
+ if "StructureType" in e:
for v in sorted(enum_type_dict[e]):
struct_name = get_struct_name_from_struct_type(v)
print_func_name = self._get_gv_func_name(struct_name)
# TODO : Hand-coded fixes for some exceptions
- #if 'VK_PIPELINE_CB_STATE_CREATE_INFO' in struct_name:
+ #if 'VkPipelineCbStateCreateInfo' in struct_name:
# struct_name = 'VK_PIPELINE_CB_STATE'
- if 'VK_SEMAPHORE_CREATE_INFO' in struct_name:
- struct_name = 'VK_SEMAPHORE_CREATE_INFO'
+ if 'VkSemaphoreCreateInfo' in struct_name:
+ struct_name = 'VkSemaphoreCreateInfo'
print_func_name = self._get_gv_func_name(struct_name)
- elif 'VK_SEMAPHORE_OPEN_INFO' in struct_name:
- struct_name = 'VK_SEMAPHORE_OPEN_INFO'
+ elif 'VkSemaphoreOpenInfo' in struct_name:
+ struct_name = 'VkSemaphoreOpenInfo'
print_func_name = self._get_gv_func_name(struct_name)
gv_funcs.append(' case %s:\n' % (v))
gv_funcs.append(' return %s((%s*)pStruct, nodeName);\n' % (print_func_name, struct_name))
@@ -1655,7 +1651,7 @@ def main(argv=None):
if opts.gen_struct_wrappers:
sw = StructWrapperGen(struct_dict, os.path.basename(opts.input_file).strip(".h"), os.path.dirname(enum_sh_filename))
#print(sw.get_class_name(struct))
- sw.set_include_headers([os.path.basename(opts.input_file),os.path.basename(enum_sh_filename),"stdint.h","stdio.h","stdlib.h"])
+ sw.set_include_headers([os.path.basename(opts.input_file),os.path.basename(enum_sh_filename),"stdint.h","inttypes.h", "stdio.h","stdlib.h"])
print("Generating struct wrapper header to %s" % sw.header_filename)
sw.generateHeader()
print("Generating struct wrapper class to %s" % sw.class_filename)
@@ -1679,7 +1675,7 @@ def main(argv=None):
cmg.generate()
if opts.gen_graphviz:
gv = GraphVizGen(struct_dict, os.path.basename(opts.input_file).strip(".h"), os.path.dirname(enum_sh_filename))
- gv.set_include_headers([os.path.basename(opts.input_file),os.path.basename(enum_sh_filename),"stdint.h","stdio.h","stdlib.h"])
+ gv.set_include_headers([os.path.basename(opts.input_file),os.path.basename(enum_sh_filename),"stdint.h","stdio.h","stdlib.h", "inttypes.h"])
gv.generate()
print("DONE!")
#print(typedef_rev_dict)
diff --git a/vulkan.py b/vulkan.py
index 28fcd7c9..07f431b6 100755
--- a/vulkan.py
+++ b/vulkan.py
@@ -183,460 +183,460 @@ core = Extension(
name="VK_CORE",
headers=["vulkan.h", "vkDbg.h"],
objects=[
- "VK_INSTANCE",
- "VK_PHYSICAL_GPU",
- "VK_BASE_OBJECT",
- "VK_DEVICE",
- "VK_QUEUE",
- "VK_GPU_MEMORY",
- "VK_OBJECT",
- "VK_BUFFER",
- "VK_BUFFER_VIEW",
- "VK_IMAGE",
- "VK_IMAGE_VIEW",
- "VK_COLOR_ATTACHMENT_VIEW",
- "VK_DEPTH_STENCIL_VIEW",
- "VK_SHADER",
- "VK_PIPELINE",
- "VK_SAMPLER",
- "VK_DESCRIPTOR_SET",
- "VK_DESCRIPTOR_SET_LAYOUT",
- "VK_DESCRIPTOR_SET_LAYOUT_CHAIN",
- "VK_DESCRIPTOR_POOL",
- "VK_DYNAMIC_STATE_OBJECT",
- "VK_DYNAMIC_VP_STATE_OBJECT",
- "VK_DYNAMIC_RS_STATE_OBJECT",
- "VK_DYNAMIC_CB_STATE_OBJECT",
- "VK_DYNAMIC_DS_STATE_OBJECT",
- "VK_CMD_BUFFER",
- "VK_FENCE",
- "VK_SEMAPHORE",
- "VK_EVENT",
- "VK_QUERY_POOL",
- "VK_FRAMEBUFFER",
- "VK_RENDER_PASS",
+ "VkInstance",
+ "VkPhysicalGpu",
+ "VkBaseObject",
+ "VkDevice",
+ "VkQueue",
+ "VkGpuMemory",
+ "VkObject",
+ "VkBuffer",
+ "VkBufferView",
+ "VkImage",
+ "VkImageView",
+ "VkColorAttachmentView",
+ "VkDepthStencilView",
+ "VkShader",
+ "VkPipeline",
+ "VkSampler",
+ "VkDescriptorSet",
+ "VkDescriptorSetLayout",
+ "VkDescriptorSetLayoutChain",
+ "VkDescriptorPool",
+ "VkDynamicStateObject",
+ "VkDynamicVpStateObject",
+ "VkDynamicRsStateObject",
+ "VkDynamicCbStateObject",
+ "VkDynamicDsStateObject",
+ "VkCmdBuffer",
+ "VkFence",
+ "VkSemaphore",
+ "VkEvent",
+ "VkQueryPool",
+ "VkFramebuffer",
+ "VkRenderPass",
],
protos=[
- Proto("VK_RESULT", "CreateInstance",
+ Proto("VkResult", "CreateInstance",
[Param("const VkInstanceCreateInfo*", "pCreateInfo"),
- Param("VK_INSTANCE*", "pInstance")]),
+ Param("VkInstance*", "pInstance")]),
- Proto("VK_RESULT", "DestroyInstance",
- [Param("VK_INSTANCE", "instance")]),
+ Proto("VkResult", "DestroyInstance",
+ [Param("VkInstance", "instance")]),
- Proto("VK_RESULT", "EnumerateGpus",
- [Param("VK_INSTANCE", "instance"),
+ Proto("VkResult", "EnumerateGpus",
+ [Param("VkInstance", "instance"),
Param("uint32_t", "maxGpus"),
Param("uint32_t*", "pGpuCount"),
- Param("VK_PHYSICAL_GPU*", "pGpus")]),
+ Param("VkPhysicalGpu*", "pGpus")]),
- Proto("VK_RESULT", "GetGpuInfo",
- [Param("VK_PHYSICAL_GPU", "gpu"),
- Param("VK_PHYSICAL_GPU_INFO_TYPE", "infoType"),
+ Proto("VkResult", "GetGpuInfo",
+ [Param("VkPhysicalGpu", "gpu"),
+ Param("VkPhysicalGpuInfoType", "infoType"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
Proto("void*", "GetProcAddr",
- [Param("VK_PHYSICAL_GPU", "gpu"),
+ [Param("VkPhysicalGpu", "gpu"),
Param("const char*", "pName")]),
- Proto("VK_RESULT", "CreateDevice",
- [Param("VK_PHYSICAL_GPU", "gpu"),
+ Proto("VkResult", "CreateDevice",
+ [Param("VkPhysicalGpu", "gpu"),
Param("const VkDeviceCreateInfo*", "pCreateInfo"),
- Param("VK_DEVICE*", "pDevice")]),
+ Param("VkDevice*", "pDevice")]),
- Proto("VK_RESULT", "DestroyDevice",
- [Param("VK_DEVICE", "device")]),
+ Proto("VkResult", "DestroyDevice",
+ [Param("VkDevice", "device")]),
- Proto("VK_RESULT", "GetExtensionSupport",
- [Param("VK_PHYSICAL_GPU", "gpu"),
+ Proto("VkResult", "GetExtensionSupport",
+ [Param("VkPhysicalGpu", "gpu"),
Param("const char*", "pExtName")]),
- Proto("VK_RESULT", "EnumerateLayers",
- [Param("VK_PHYSICAL_GPU", "gpu"),
+ Proto("VkResult", "EnumerateLayers",
+ [Param("VkPhysicalGpu", "gpu"),
Param("size_t", "maxLayerCount"),
Param("size_t", "maxStringSize"),
Param("size_t*", "pOutLayerCount"),
Param("char* const*", "pOutLayers"),
Param("void*", "pReserved")]),
- Proto("VK_RESULT", "GetDeviceQueue",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "GetDeviceQueue",
+ [Param("VkDevice", "device"),
Param("uint32_t", "queueNodeIndex"),
Param("uint32_t", "queueIndex"),
- Param("VK_QUEUE*", "pQueue")]),
+ Param("VkQueue*", "pQueue")]),
- Proto("VK_RESULT", "QueueSubmit",
- [Param("VK_QUEUE", "queue"),
+ Proto("VkResult", "QueueSubmit",
+ [Param("VkQueue", "queue"),
Param("uint32_t", "cmdBufferCount"),
- Param("const VK_CMD_BUFFER*", "pCmdBuffers"),
- Param("VK_FENCE", "fence")]),
+ Param("const VkCmdBuffer*", "pCmdBuffers"),
+ Param("VkFence", "fence")]),
- Proto("VK_RESULT", "QueueAddMemReference",
- [Param("VK_QUEUE", "queue"),
- Param("VK_GPU_MEMORY", "mem")]),
+ Proto("VkResult", "QueueAddMemReference",
+ [Param("VkQueue", "queue"),
+ Param("VkGpuMemory", "mem")]),
- Proto("VK_RESULT", "QueueRemoveMemReference",
- [Param("VK_QUEUE", "queue"),
- Param("VK_GPU_MEMORY", "mem")]),
+ Proto("VkResult", "QueueRemoveMemReference",
+ [Param("VkQueue", "queue"),
+ Param("VkGpuMemory", "mem")]),
- Proto("VK_RESULT", "QueueWaitIdle",
- [Param("VK_QUEUE", "queue")]),
+ Proto("VkResult", "QueueWaitIdle",
+ [Param("VkQueue", "queue")]),
- Proto("VK_RESULT", "DeviceWaitIdle",
- [Param("VK_DEVICE", "device")]),
+ Proto("VkResult", "DeviceWaitIdle",
+ [Param("VkDevice", "device")]),
- Proto("VK_RESULT", "AllocMemory",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "AllocMemory",
+ [Param("VkDevice", "device"),
Param("const VkMemoryAllocInfo*", "pAllocInfo"),
- Param("VK_GPU_MEMORY*", "pMem")]),
+ Param("VkGpuMemory*", "pMem")]),
- Proto("VK_RESULT", "FreeMemory",
- [Param("VK_GPU_MEMORY", "mem")]),
+ Proto("VkResult", "FreeMemory",
+ [Param("VkGpuMemory", "mem")]),
- Proto("VK_RESULT", "SetMemoryPriority",
- [Param("VK_GPU_MEMORY", "mem"),
- Param("VK_MEMORY_PRIORITY", "priority")]),
+ Proto("VkResult", "SetMemoryPriority",
+ [Param("VkGpuMemory", "mem"),
+ Param("VkMemoryPriority", "priority")]),
- Proto("VK_RESULT", "MapMemory",
- [Param("VK_GPU_MEMORY", "mem"),
- Param("VK_FLAGS", "flags"),
+ Proto("VkResult", "MapMemory",
+ [Param("VkGpuMemory", "mem"),
+ Param("VkFlags", "flags"),
Param("void**", "ppData")]),
- Proto("VK_RESULT", "UnmapMemory",
- [Param("VK_GPU_MEMORY", "mem")]),
+ Proto("VkResult", "UnmapMemory",
+ [Param("VkGpuMemory", "mem")]),
- Proto("VK_RESULT", "PinSystemMemory",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "PinSystemMemory",
+ [Param("VkDevice", "device"),
Param("const void*", "pSysMem"),
Param("size_t", "memSize"),
- Param("VK_GPU_MEMORY*", "pMem")]),
-
- Proto("VK_RESULT", "GetMultiGpuCompatibility",
- [Param("VK_PHYSICAL_GPU", "gpu0"),
- Param("VK_PHYSICAL_GPU", "gpu1"),
- Param("VK_GPU_COMPATIBILITY_INFO*", "pInfo")]),
-
- Proto("VK_RESULT", "OpenSharedMemory",
- [Param("VK_DEVICE", "device"),
- Param("const VK_MEMORY_OPEN_INFO*", "pOpenInfo"),
- Param("VK_GPU_MEMORY*", "pMem")]),
-
- Proto("VK_RESULT", "OpenSharedSemaphore",
- [Param("VK_DEVICE", "device"),
- Param("const VK_SEMAPHORE_OPEN_INFO*", "pOpenInfo"),
- Param("VK_SEMAPHORE*", "pSemaphore")]),
-
- Proto("VK_RESULT", "OpenPeerMemory",
- [Param("VK_DEVICE", "device"),
- Param("const VK_PEER_MEMORY_OPEN_INFO*", "pOpenInfo"),
- Param("VK_GPU_MEMORY*", "pMem")]),
-
- Proto("VK_RESULT", "OpenPeerImage",
- [Param("VK_DEVICE", "device"),
- Param("const VK_PEER_IMAGE_OPEN_INFO*", "pOpenInfo"),
- Param("VK_IMAGE*", "pImage"),
- Param("VK_GPU_MEMORY*", "pMem")]),
-
- Proto("VK_RESULT", "DestroyObject",
- [Param("VK_OBJECT", "object")]),
-
- Proto("VK_RESULT", "GetObjectInfo",
- [Param("VK_BASE_OBJECT", "object"),
- Param("VK_OBJECT_INFO_TYPE", "infoType"),
+ Param("VkGpuMemory*", "pMem")]),
+
+ Proto("VkResult", "GetMultiGpuCompatibility",
+ [Param("VkPhysicalGpu", "gpu0"),
+ Param("VkPhysicalGpu", "gpu1"),
+ Param("VkGpuCompatibilityInfo*", "pInfo")]),
+
+ Proto("VkResult", "OpenSharedMemory",
+ [Param("VkDevice", "device"),
+ Param("const VkMemoryOpenInfo*", "pOpenInfo"),
+ Param("VkGpuMemory*", "pMem")]),
+
+ Proto("VkResult", "OpenSharedSemaphore",
+ [Param("VkDevice", "device"),
+ Param("const VkSemaphoreOpenInfo*", "pOpenInfo"),
+ Param("VkSemaphore*", "pSemaphore")]),
+
+ Proto("VkResult", "OpenPeerMemory",
+ [Param("VkDevice", "device"),
+ Param("const VkPeerMemoryOpenInfo*", "pOpenInfo"),
+ Param("VkGpuMemory*", "pMem")]),
+
+ Proto("VkResult", "OpenPeerImage",
+ [Param("VkDevice", "device"),
+ Param("const VkPeerImageOpenInfo*", "pOpenInfo"),
+ Param("VkImage*", "pImage"),
+ Param("VkGpuMemory*", "pMem")]),
+
+ Proto("VkResult", "DestroyObject",
+ [Param("VkObject", "object")]),
+
+ Proto("VkResult", "GetObjectInfo",
+ [Param("VkBaseObject", "object"),
+ Param("VkObjectInfoType", "infoType"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VK_RESULT", "BindObjectMemory",
- [Param("VK_OBJECT", "object"),
+ Proto("VkResult", "BindObjectMemory",
+ [Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
- Param("VK_GPU_MEMORY", "mem"),
- Param("VK_GPU_SIZE", "offset")]),
+ Param("VkGpuMemory", "mem"),
+ Param("VkGpuSize", "offset")]),
- Proto("VK_RESULT", "BindObjectMemoryRange",
- [Param("VK_OBJECT", "object"),
+ Proto("VkResult", "BindObjectMemoryRange",
+ [Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
- Param("VK_GPU_SIZE", "rangeOffset"),
- Param("VK_GPU_SIZE", "rangeSize"),
- Param("VK_GPU_MEMORY", "mem"),
- Param("VK_GPU_SIZE", "memOffset")]),
+ Param("VkGpuSize", "rangeOffset"),
+ Param("VkGpuSize", "rangeSize"),
+ Param("VkGpuMemory", "mem"),
+ Param("VkGpuSize", "memOffset")]),
- Proto("VK_RESULT", "BindImageMemoryRange",
- [Param("VK_IMAGE", "image"),
+ Proto("VkResult", "BindImageMemoryRange",
+ [Param("VkImage", "image"),
Param("uint32_t", "allocationIdx"),
- Param("const VK_IMAGE_MEMORY_BIND_INFO*", "bindInfo"),
- Param("VK_GPU_MEMORY", "mem"),
- Param("VK_GPU_SIZE", "memOffset")]),
+ Param("const VkImageMemoryBindInfo*", "bindInfo"),
+ Param("VkGpuMemory", "mem"),
+ Param("VkGpuSize", "memOffset")]),
- Proto("VK_RESULT", "CreateFence",
- [Param("VK_DEVICE", "device"),
- Param("const VK_FENCE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_FENCE*", "pFence")]),
+ Proto("VkResult", "CreateFence",
+ [Param("VkDevice", "device"),
+ Param("const VkFenceCreateInfo*", "pCreateInfo"),
+ Param("VkFence*", "pFence")]),
- Proto("VK_RESULT", "ResetFences",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "ResetFences",
+ [Param("VkDevice", "device"),
Param("uint32_t", "fenceCount"),
- Param("VK_FENCE*", "pFences")]),
+ Param("VkFence*", "pFences")]),
- Proto("VK_RESULT", "GetFenceStatus",
- [Param("VK_FENCE", "fence")]),
+ Proto("VkResult", "GetFenceStatus",
+ [Param("VkFence", "fence")]),
- Proto("VK_RESULT", "WaitForFences",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "WaitForFences",
+ [Param("VkDevice", "device"),
Param("uint32_t", "fenceCount"),
- Param("const VK_FENCE*", "pFences"),
+ Param("const VkFence*", "pFences"),
Param("bool32_t", "waitAll"),
Param("uint64_t", "timeout")]),
- Proto("VK_RESULT", "CreateSemaphore",
- [Param("VK_DEVICE", "device"),
- Param("const VK_SEMAPHORE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_SEMAPHORE*", "pSemaphore")]),
+ Proto("VkResult", "CreateSemaphore",
+ [Param("VkDevice", "device"),
+ Param("const VkSemaphoreCreateInfo*", "pCreateInfo"),
+ Param("VkSemaphore*", "pSemaphore")]),
- Proto("VK_RESULT", "QueueSignalSemaphore",
- [Param("VK_QUEUE", "queue"),
- Param("VK_SEMAPHORE", "semaphore")]),
+ Proto("VkResult", "QueueSignalSemaphore",
+ [Param("VkQueue", "queue"),
+ Param("VkSemaphore", "semaphore")]),
- Proto("VK_RESULT", "QueueWaitSemaphore",
- [Param("VK_QUEUE", "queue"),
- Param("VK_SEMAPHORE", "semaphore")]),
+ Proto("VkResult", "QueueWaitSemaphore",
+ [Param("VkQueue", "queue"),
+ Param("VkSemaphore", "semaphore")]),
- Proto("VK_RESULT", "CreateEvent",
- [Param("VK_DEVICE", "device"),
- Param("const VK_EVENT_CREATE_INFO*", "pCreateInfo"),
- Param("VK_EVENT*", "pEvent")]),
+ Proto("VkResult", "CreateEvent",
+ [Param("VkDevice", "device"),
+ Param("const VkEventCreateInfo*", "pCreateInfo"),
+ Param("VkEvent*", "pEvent")]),
- Proto("VK_RESULT", "GetEventStatus",
- [Param("VK_EVENT", "event")]),
+ Proto("VkResult", "GetEventStatus",
+ [Param("VkEvent", "event")]),
- Proto("VK_RESULT", "SetEvent",
- [Param("VK_EVENT", "event")]),
+ Proto("VkResult", "SetEvent",
+ [Param("VkEvent", "event")]),
- Proto("VK_RESULT", "ResetEvent",
- [Param("VK_EVENT", "event")]),
+ Proto("VkResult", "ResetEvent",
+ [Param("VkEvent", "event")]),
- Proto("VK_RESULT", "CreateQueryPool",
- [Param("VK_DEVICE", "device"),
- Param("const VK_QUERY_POOL_CREATE_INFO*", "pCreateInfo"),
- Param("VK_QUERY_POOL*", "pQueryPool")]),
+ Proto("VkResult", "CreateQueryPool",
+ [Param("VkDevice", "device"),
+ Param("const VkQueryPoolCreateInfo*", "pCreateInfo"),
+ Param("VkQueryPool*", "pQueryPool")]),
- Proto("VK_RESULT", "GetQueryPoolResults",
- [Param("VK_QUERY_POOL", "queryPool"),
+ Proto("VkResult", "GetQueryPoolResults",
+ [Param("VkQueryPool", "queryPool"),
Param("uint32_t", "startQuery"),
Param("uint32_t", "queryCount"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VK_RESULT", "GetFormatInfo",
- [Param("VK_DEVICE", "device"),
- Param("VK_FORMAT", "format"),
- Param("VK_FORMAT_INFO_TYPE", "infoType"),
+ Proto("VkResult", "GetFormatInfo",
+ [Param("VkDevice", "device"),
+ Param("VkFormat", "format"),
+ Param("VkFormatInfoType", "infoType"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VK_RESULT", "CreateBuffer",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "CreateBuffer",
+ [Param("VkDevice", "device"),
Param("const VkBufferCreateInfo*", "pCreateInfo"),
- Param("VK_BUFFER*", "pBuffer")]),
+ Param("VkBuffer*", "pBuffer")]),
- Proto("VK_RESULT", "CreateBufferView",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "CreateBufferView",
+ [Param("VkDevice", "device"),
Param("const VkBufferViewCreateInfo*", "pCreateInfo"),
- Param("VK_BUFFER_VIEW*", "pView")]),
+ Param("VkBufferView*", "pView")]),
- Proto("VK_RESULT", "CreateImage",
- [Param("VK_DEVICE", "device"),
- Param("const VK_IMAGE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_IMAGE*", "pImage")]),
+ Proto("VkResult", "CreateImage",
+ [Param("VkDevice", "device"),
+ Param("const VkImageCreateInfo*", "pCreateInfo"),
+ Param("VkImage*", "pImage")]),
- Proto("VK_RESULT", "GetImageSubresourceInfo",
- [Param("VK_IMAGE", "image"),
- Param("const VK_IMAGE_SUBRESOURCE*", "pSubresource"),
- Param("VK_SUBRESOURCE_INFO_TYPE", "infoType"),
+ Proto("VkResult", "GetImageSubresourceInfo",
+ [Param("VkImage", "image"),
+ Param("const VkImageSubresource*", "pSubresource"),
+ Param("VkSubresourceInfoType", "infoType"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VK_RESULT", "CreateImageView",
- [Param("VK_DEVICE", "device"),
- Param("const VK_IMAGE_VIEW_CREATE_INFO*", "pCreateInfo"),
- Param("VK_IMAGE_VIEW*", "pView")]),
-
- Proto("VK_RESULT", "CreateColorAttachmentView",
- [Param("VK_DEVICE", "device"),
- Param("const VK_COLOR_ATTACHMENT_VIEW_CREATE_INFO*", "pCreateInfo"),
- Param("VK_COLOR_ATTACHMENT_VIEW*", "pView")]),
-
- Proto("VK_RESULT", "CreateDepthStencilView",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DEPTH_STENCIL_VIEW_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DEPTH_STENCIL_VIEW*", "pView")]),
-
- Proto("VK_RESULT", "CreateShader",
- [Param("VK_DEVICE", "device"),
- Param("const VK_SHADER_CREATE_INFO*", "pCreateInfo"),
- Param("VK_SHADER*", "pShader")]),
-
- Proto("VK_RESULT", "CreateGraphicsPipeline",
- [Param("VK_DEVICE", "device"),
- Param("const VK_GRAPHICS_PIPELINE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_PIPELINE*", "pPipeline")]),
-
- Proto("VK_RESULT", "CreateGraphicsPipelineDerivative",
- [Param("VK_DEVICE", "device"),
- Param("const VK_GRAPHICS_PIPELINE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_PIPELINE", "basePipeline"),
- Param("VK_PIPELINE*", "pPipeline")]),
-
- Proto("VK_RESULT", "CreateComputePipeline",
- [Param("VK_DEVICE", "device"),
- Param("const VK_COMPUTE_PIPELINE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_PIPELINE*", "pPipeline")]),
-
- Proto("VK_RESULT", "StorePipeline",
- [Param("VK_PIPELINE", "pipeline"),
+ Proto("VkResult", "CreateImageView",
+ [Param("VkDevice", "device"),
+ Param("const VkImageViewCreateInfo*", "pCreateInfo"),
+ Param("VkImageView*", "pView")]),
+
+ Proto("VkResult", "CreateColorAttachmentView",
+ [Param("VkDevice", "device"),
+ Param("const VkColorAttachmentViewCreateInfo*", "pCreateInfo"),
+ Param("VkColorAttachmentView*", "pView")]),
+
+ Proto("VkResult", "CreateDepthStencilView",
+ [Param("VkDevice", "device"),
+ Param("const VkDepthStencilViewCreateInfo*", "pCreateInfo"),
+ Param("VkDepthStencilView*", "pView")]),
+
+ Proto("VkResult", "CreateShader",
+ [Param("VkDevice", "device"),
+ Param("const VkShaderCreateInfo*", "pCreateInfo"),
+ Param("VkShader*", "pShader")]),
+
+ Proto("VkResult", "CreateGraphicsPipeline",
+ [Param("VkDevice", "device"),
+ Param("const VkGraphicsPipelineCreateInfo*", "pCreateInfo"),
+ Param("VkPipeline*", "pPipeline")]),
+
+ Proto("VkResult", "CreateGraphicsPipelineDerivative",
+ [Param("VkDevice", "device"),
+ Param("const VkGraphicsPipelineCreateInfo*", "pCreateInfo"),
+ Param("VkPipeline", "basePipeline"),
+ Param("VkPipeline*", "pPipeline")]),
+
+ Proto("VkResult", "CreateComputePipeline",
+ [Param("VkDevice", "device"),
+ Param("const VkComputePipelineCreateInfo*", "pCreateInfo"),
+ Param("VkPipeline*", "pPipeline")]),
+
+ Proto("VkResult", "StorePipeline",
+ [Param("VkPipeline", "pipeline"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VK_RESULT", "LoadPipeline",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "LoadPipeline",
+ [Param("VkDevice", "device"),
Param("size_t", "dataSize"),
Param("const void*", "pData"),
- Param("VK_PIPELINE*", "pPipeline")]),
+ Param("VkPipeline*", "pPipeline")]),
- Proto("VK_RESULT", "LoadPipelineDerivative",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "LoadPipelineDerivative",
+ [Param("VkDevice", "device"),
Param("size_t", "dataSize"),
Param("const void*", "pData"),
- Param("VK_PIPELINE", "basePipeline"),
- Param("VK_PIPELINE*", "pPipeline")]),
+ Param("VkPipeline", "basePipeline"),
+ Param("VkPipeline*", "pPipeline")]),
- Proto("VK_RESULT", "CreateSampler",
- [Param("VK_DEVICE", "device"),
- Param("const VK_SAMPLER_CREATE_INFO*", "pCreateInfo"),
- Param("VK_SAMPLER*", "pSampler")]),
+ Proto("VkResult", "CreateSampler",
+ [Param("VkDevice", "device"),
+ Param("const VkSamplerCreateInfo*", "pCreateInfo"),
+ Param("VkSampler*", "pSampler")]),
- Proto("VK_RESULT", "CreateDescriptorSetLayout",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DESCRIPTOR_SET_LAYOUT_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DESCRIPTOR_SET_LAYOUT*", "pSetLayout")]),
+ Proto("VkResult", "CreateDescriptorSetLayout",
+ [Param("VkDevice", "device"),
+ Param("const VkDescriptorSetLayoutCreateInfo*", "pCreateInfo"),
+ Param("VkDescriptorSetLayout*", "pSetLayout")]),
- Proto("VK_RESULT", "CreateDescriptorSetLayoutChain",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "CreateDescriptorSetLayoutChain",
+ [Param("VkDevice", "device"),
Param("uint32_t", "setLayoutArrayCount"),
- Param("const VK_DESCRIPTOR_SET_LAYOUT*", "pSetLayoutArray"),
- Param("VK_DESCRIPTOR_SET_LAYOUT_CHAIN*", "pLayoutChain")]),
+ Param("const VkDescriptorSetLayout*", "pSetLayoutArray"),
+ Param("VkDescriptorSetLayoutChain*", "pLayoutChain")]),
- Proto("VK_RESULT", "BeginDescriptorPoolUpdate",
- [Param("VK_DEVICE", "device"),
- Param("VK_DESCRIPTOR_UPDATE_MODE", "updateMode")]),
+ Proto("VkResult", "BeginDescriptorPoolUpdate",
+ [Param("VkDevice", "device"),
+ Param("VkDescriptorUpdateMode", "updateMode")]),
- Proto("VK_RESULT", "EndDescriptorPoolUpdate",
- [Param("VK_DEVICE", "device"),
- Param("VK_CMD_BUFFER", "cmd")]),
+ Proto("VkResult", "EndDescriptorPoolUpdate",
+ [Param("VkDevice", "device"),
+ Param("VkCmdBuffer", "cmd")]),
- Proto("VK_RESULT", "CreateDescriptorPool",
- [Param("VK_DEVICE", "device"),
- Param("VK_DESCRIPTOR_POOL_USAGE", "poolUsage"),
+ Proto("VkResult", "CreateDescriptorPool",
+ [Param("VkDevice", "device"),
+ Param("VkDescriptorPoolUsage", "poolUsage"),
Param("uint32_t", "maxSets"),
- Param("const VK_DESCRIPTOR_POOL_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DESCRIPTOR_POOL*", "pDescriptorPool")]),
+ Param("const VkDescriptorPoolCreateInfo*", "pCreateInfo"),
+ Param("VkDescriptorPool*", "pDescriptorPool")]),
- Proto("VK_RESULT", "ResetDescriptorPool",
- [Param("VK_DESCRIPTOR_POOL", "descriptorPool")]),
+ Proto("VkResult", "ResetDescriptorPool",
+ [Param("VkDescriptorPool", "descriptorPool")]),
- Proto("VK_RESULT", "AllocDescriptorSets",
- [Param("VK_DESCRIPTOR_POOL", "descriptorPool"),
- Param("VK_DESCRIPTOR_SET_USAGE", "setUsage"),
+ Proto("VkResult", "AllocDescriptorSets",
+ [Param("VkDescriptorPool", "descriptorPool"),
+ Param("VkDescriptorSetUsage", "setUsage"),
Param("uint32_t", "count"),
- Param("const VK_DESCRIPTOR_SET_LAYOUT*", "pSetLayouts"),
- Param("VK_DESCRIPTOR_SET*", "pDescriptorSets"),
+ Param("const VkDescriptorSetLayout*", "pSetLayouts"),
+ Param("VkDescriptorSet*", "pDescriptorSets"),
Param("uint32_t*", "pCount")]),
Proto("void", "ClearDescriptorSets",
- [Param("VK_DESCRIPTOR_POOL", "descriptorPool"),
+ [Param("VkDescriptorPool", "descriptorPool"),
Param("uint32_t", "count"),
- Param("const VK_DESCRIPTOR_SET*", "pDescriptorSets")]),
+ Param("const VkDescriptorSet*", "pDescriptorSets")]),
Proto("void", "UpdateDescriptors",
- [Param("VK_DESCRIPTOR_SET", "descriptorSet"),
+ [Param("VkDescriptorSet", "descriptorSet"),
Param("uint32_t", "updateCount"),
Param("const void**", "ppUpdateArray")]),
- Proto("VK_RESULT", "CreateDynamicViewportState",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DYNAMIC_VP_STATE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DYNAMIC_VP_STATE_OBJECT*", "pState")]),
+ Proto("VkResult", "CreateDynamicViewportState",
+ [Param("VkDevice", "device"),
+ Param("const VkDynamicVpStateCreateInfo*", "pCreateInfo"),
+ Param("VkDynamicVpStateObject*", "pState")]),
- Proto("VK_RESULT", "CreateDynamicRasterState",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DYNAMIC_RS_STATE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DYNAMIC_RS_STATE_OBJECT*", "pState")]),
+ Proto("VkResult", "CreateDynamicRasterState",
+ [Param("VkDevice", "device"),
+ Param("const VkDynamicRsStateCreateInfo*", "pCreateInfo"),
+ Param("VkDynamicRsStateObject*", "pState")]),
- Proto("VK_RESULT", "CreateDynamicColorBlendState",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DYNAMIC_CB_STATE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DYNAMIC_CB_STATE_OBJECT*", "pState")]),
+ Proto("VkResult", "CreateDynamicColorBlendState",
+ [Param("VkDevice", "device"),
+ Param("const VkDynamicCbStateCreateInfo*", "pCreateInfo"),
+ Param("VkDynamicCbStateObject*", "pState")]),
- Proto("VK_RESULT", "CreateDynamicDepthStencilState",
- [Param("VK_DEVICE", "device"),
- Param("const VK_DYNAMIC_DS_STATE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_DYNAMIC_DS_STATE_OBJECT*", "pState")]),
+ Proto("VkResult", "CreateDynamicDepthStencilState",
+ [Param("VkDevice", "device"),
+ Param("const VkDynamicDsStateCreateInfo*", "pCreateInfo"),
+ Param("VkDynamicDsStateObject*", "pState")]),
- Proto("VK_RESULT", "CreateCommandBuffer",
- [Param("VK_DEVICE", "device"),
- Param("const VK_CMD_BUFFER_CREATE_INFO*", "pCreateInfo"),
- Param("VK_CMD_BUFFER*", "pCmdBuffer")]),
+ Proto("VkResult", "CreateCommandBuffer",
+ [Param("VkDevice", "device"),
+ Param("const VkCmdBufferCreateInfo*", "pCreateInfo"),
+ Param("VkCmdBuffer*", "pCmdBuffer")]),
- Proto("VK_RESULT", "BeginCommandBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("const VK_CMD_BUFFER_BEGIN_INFO*", "pBeginInfo")]),
+ Proto("VkResult", "BeginCommandBuffer",
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("const VkCmdBufferBeginInfo*", "pBeginInfo")]),
- Proto("VK_RESULT", "EndCommandBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer")]),
+ Proto("VkResult", "EndCommandBuffer",
+ [Param("VkCmdBuffer", "cmdBuffer")]),
- Proto("VK_RESULT", "ResetCommandBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer")]),
+ Proto("VkResult", "ResetCommandBuffer",
+ [Param("VkCmdBuffer", "cmdBuffer")]),
Proto("void", "CmdBindPipeline",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_PIPELINE_BIND_POINT", "pipelineBindPoint"),
- Param("VK_PIPELINE", "pipeline")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkPipelineBindPoint", "pipelineBindPoint"),
+ Param("VkPipeline", "pipeline")]),
Proto("void", "CmdBindDynamicStateObject",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_STATE_BIND_POINT", "stateBindPoint"),
- Param("VK_DYNAMIC_STATE_OBJECT", "state")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkStateBindPoint", "stateBindPoint"),
+ Param("VkDynamicStateObject", "state")]),
Proto("void", "CmdBindDescriptorSets",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_PIPELINE_BIND_POINT", "pipelineBindPoint"),
- Param("VK_DESCRIPTOR_SET_LAYOUT_CHAIN", "layoutChain"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkPipelineBindPoint", "pipelineBindPoint"),
+ Param("VkDescriptorSetLayoutChain", "layoutChain"),
Param("uint32_t", "layoutChainSlot"),
Param("uint32_t", "count"),
- Param("const VK_DESCRIPTOR_SET*", "pDescriptorSets"),
+ Param("const VkDescriptorSet*", "pDescriptorSets"),
Param("const uint32_t*", "pUserData")]),
Proto("void", "CmdBindVertexBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "buffer"),
- Param("VK_GPU_SIZE", "offset"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "buffer"),
+ Param("VkGpuSize", "offset"),
Param("uint32_t", "binding")]),
Proto("void", "CmdBindIndexBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "buffer"),
- Param("VK_GPU_SIZE", "offset"),
- Param("VK_INDEX_TYPE", "indexType")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "buffer"),
+ Param("VkGpuSize", "offset"),
+ Param("VkIndexType", "indexType")]),
Proto("void", "CmdDraw",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
Param("uint32_t", "firstVertex"),
Param("uint32_t", "vertexCount"),
Param("uint32_t", "firstInstance"),
Param("uint32_t", "instanceCount")]),
Proto("void", "CmdDrawIndexed",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
Param("uint32_t", "firstIndex"),
Param("uint32_t", "indexCount"),
Param("int32_t", "vertexOffset"),
@@ -644,241 +644,241 @@ core = Extension(
Param("uint32_t", "instanceCount")]),
Proto("void", "CmdDrawIndirect",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "buffer"),
- Param("VK_GPU_SIZE", "offset"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "buffer"),
+ Param("VkGpuSize", "offset"),
Param("uint32_t", "count"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDrawIndexedIndirect",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "buffer"),
- Param("VK_GPU_SIZE", "offset"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "buffer"),
+ Param("VkGpuSize", "offset"),
Param("uint32_t", "count"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDispatch",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
Param("uint32_t", "x"),
Param("uint32_t", "y"),
Param("uint32_t", "z")]),
Proto("void", "CmdDispatchIndirect",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "buffer"),
- Param("VK_GPU_SIZE", "offset")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "buffer"),
+ Param("VkGpuSize", "offset")]),
Proto("void", "CmdCopyBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "srcBuffer"),
- Param("VK_BUFFER", "destBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "srcBuffer"),
+ Param("VkBuffer", "destBuffer"),
Param("uint32_t", "regionCount"),
- Param("const VK_BUFFER_COPY*", "pRegions")]),
+ Param("const VkBufferCopy*", "pRegions")]),
Proto("void", "CmdCopyImage",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "srcImage"),
- Param("VK_IMAGE_LAYOUT", "srcImageLayout"),
- Param("VK_IMAGE", "destImage"),
- Param("VK_IMAGE_LAYOUT", "destImageLayout"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "srcImage"),
+ Param("VkImageLayout", "srcImageLayout"),
+ Param("VkImage", "destImage"),
+ Param("VkImageLayout", "destImageLayout"),
Param("uint32_t", "regionCount"),
- Param("const VK_IMAGE_COPY*", "pRegions")]),
+ Param("const VkImageCopy*", "pRegions")]),
Proto("void", "CmdBlitImage",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "srcImage"),
- Param("VK_IMAGE_LAYOUT", "srcImageLayout"),
- Param("VK_IMAGE", "destImage"),
- Param("VK_IMAGE_LAYOUT", "destImageLayout"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "srcImage"),
+ Param("VkImageLayout", "srcImageLayout"),
+ Param("VkImage", "destImage"),
+ Param("VkImageLayout", "destImageLayout"),
Param("uint32_t", "regionCount"),
- Param("const VK_IMAGE_BLIT*", "pRegions")]),
+ Param("const VkImageBlit*", "pRegions")]),
Proto("void", "CmdCopyBufferToImage",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "srcBuffer"),
- Param("VK_IMAGE", "destImage"),
- Param("VK_IMAGE_LAYOUT", "destImageLayout"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "srcBuffer"),
+ Param("VkImage", "destImage"),
+ Param("VkImageLayout", "destImageLayout"),
Param("uint32_t", "regionCount"),
- Param("const VK_BUFFER_IMAGE_COPY*", "pRegions")]),
+ Param("const VkBufferImageCopy*", "pRegions")]),
Proto("void", "CmdCopyImageToBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "srcImage"),
- Param("VK_IMAGE_LAYOUT", "srcImageLayout"),
- Param("VK_BUFFER", "destBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "srcImage"),
+ Param("VkImageLayout", "srcImageLayout"),
+ Param("VkBuffer", "destBuffer"),
Param("uint32_t", "regionCount"),
- Param("const VK_BUFFER_IMAGE_COPY*", "pRegions")]),
+ Param("const VkBufferImageCopy*", "pRegions")]),
Proto("void", "CmdCloneImageData",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "srcImage"),
- Param("VK_IMAGE_LAYOUT", "srcImageLayout"),
- Param("VK_IMAGE", "destImage"),
- Param("VK_IMAGE_LAYOUT", "destImageLayout")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "srcImage"),
+ Param("VkImageLayout", "srcImageLayout"),
+ Param("VkImage", "destImage"),
+ Param("VkImageLayout", "destImageLayout")]),
Proto("void", "CmdUpdateBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "destBuffer"),
- Param("VK_GPU_SIZE", "destOffset"),
- Param("VK_GPU_SIZE", "dataSize"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "destBuffer"),
+ Param("VkGpuSize", "destOffset"),
+ Param("VkGpuSize", "dataSize"),
Param("const uint32_t*", "pData")]),
Proto("void", "CmdFillBuffer",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_BUFFER", "destBuffer"),
- Param("VK_GPU_SIZE", "destOffset"),
- Param("VK_GPU_SIZE", "fillSize"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkBuffer", "destBuffer"),
+ Param("VkGpuSize", "destOffset"),
+ Param("VkGpuSize", "fillSize"),
Param("uint32_t", "data")]),
Proto("void", "CmdClearColorImage",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "image"),
- Param("VK_IMAGE_LAYOUT", "imageLayout"),
- Param("VK_CLEAR_COLOR", "color"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "image"),
+ Param("VkImageLayout", "imageLayout"),
+ Param("VkClearColor", "color"),
Param("uint32_t", "rangeCount"),
- Param("const VK_IMAGE_SUBRESOURCE_RANGE*", "pRanges")]),
+ Param("const VkImageSubresourceRange*", "pRanges")]),
Proto("void", "CmdClearDepthStencil",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "image"),
- Param("VK_IMAGE_LAYOUT", "imageLayout"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "image"),
+ Param("VkImageLayout", "imageLayout"),
Param("float", "depth"),
Param("uint32_t", "stencil"),
Param("uint32_t", "rangeCount"),
- Param("const VK_IMAGE_SUBRESOURCE_RANGE*", "pRanges")]),
+ Param("const VkImageSubresourceRange*", "pRanges")]),
Proto("void", "CmdResolveImage",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_IMAGE", "srcImage"),
- Param("VK_IMAGE_LAYOUT", "srcImageLayout"),
- Param("VK_IMAGE", "destImage"),
- Param("VK_IMAGE_LAYOUT", "destImageLayout"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkImage", "srcImage"),
+ Param("VkImageLayout", "srcImageLayout"),
+ Param("VkImage", "destImage"),
+ Param("VkImageLayout", "destImageLayout"),
Param("uint32_t", "rectCount"),
- Param("const VK_IMAGE_RESOLVE*", "pRects")]),
+ Param("const VkImageResolve*", "pRects")]),
Proto("void", "CmdSetEvent",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_EVENT", "event"),
- Param("VK_PIPE_EVENT", "pipeEvent")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkEvent", "event"),
+ Param("VkPipeEvent", "pipeEvent")]),
Proto("void", "CmdResetEvent",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_EVENT", "event"),
- Param("VK_PIPE_EVENT", "pipeEvent")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkEvent", "event"),
+ Param("VkPipeEvent", "pipeEvent")]),
Proto("void", "CmdWaitEvents",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("const VK_EVENT_WAIT_INFO*", "pWaitInfo")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("const VkEventWaitInfo*", "pWaitInfo")]),
Proto("void", "CmdPipelineBarrier",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("const VK_PIPELINE_BARRIER*", "pBarrier")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("const VkPipelineBarrier*", "pBarrier")]),
Proto("void", "CmdBeginQuery",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_QUERY_POOL", "queryPool"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkQueryPool", "queryPool"),
Param("uint32_t", "slot"),
- Param("VK_FLAGS", "flags")]),
+ Param("VkFlags", "flags")]),
Proto("void", "CmdEndQuery",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_QUERY_POOL", "queryPool"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkQueryPool", "queryPool"),
Param("uint32_t", "slot")]),
Proto("void", "CmdResetQueryPool",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_QUERY_POOL", "queryPool"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkQueryPool", "queryPool"),
Param("uint32_t", "startQuery"),
Param("uint32_t", "queryCount")]),
Proto("void", "CmdWriteTimestamp",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_TIMESTAMP_TYPE", "timestampType"),
- Param("VK_BUFFER", "destBuffer"),
- Param("VK_GPU_SIZE", "destOffset")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkTimestampType", "timestampType"),
+ Param("VkBuffer", "destBuffer"),
+ Param("VkGpuSize", "destOffset")]),
Proto("void", "CmdInitAtomicCounters",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_PIPELINE_BIND_POINT", "pipelineBindPoint"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkPipelineBindPoint", "pipelineBindPoint"),
Param("uint32_t", "startCounter"),
Param("uint32_t", "counterCount"),
Param("const uint32_t*", "pData")]),
Proto("void", "CmdLoadAtomicCounters",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_PIPELINE_BIND_POINT", "pipelineBindPoint"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkPipelineBindPoint", "pipelineBindPoint"),
Param("uint32_t", "startCounter"),
Param("uint32_t", "counterCount"),
- Param("VK_BUFFER", "srcBuffer"),
- Param("VK_GPU_SIZE", "srcOffset")]),
+ Param("VkBuffer", "srcBuffer"),
+ Param("VkGpuSize", "srcOffset")]),
Proto("void", "CmdSaveAtomicCounters",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_PIPELINE_BIND_POINT", "pipelineBindPoint"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkPipelineBindPoint", "pipelineBindPoint"),
Param("uint32_t", "startCounter"),
Param("uint32_t", "counterCount"),
- Param("VK_BUFFER", "destBuffer"),
- Param("VK_GPU_SIZE", "destOffset")]),
+ Param("VkBuffer", "destBuffer"),
+ Param("VkGpuSize", "destOffset")]),
- Proto("VK_RESULT", "CreateFramebuffer",
- [Param("VK_DEVICE", "device"),
- Param("const VK_FRAMEBUFFER_CREATE_INFO*", "pCreateInfo"),
- Param("VK_FRAMEBUFFER*", "pFramebuffer")]),
+ Proto("VkResult", "CreateFramebuffer",
+ [Param("VkDevice", "device"),
+ Param("const VkFramebufferCreateInfo*", "pCreateInfo"),
+ Param("VkFramebuffer*", "pFramebuffer")]),
- Proto("VK_RESULT", "CreateRenderPass",
- [Param("VK_DEVICE", "device"),
- Param("const VK_RENDER_PASS_CREATE_INFO*", "pCreateInfo"),
- Param("VK_RENDER_PASS*", "pRenderPass")]),
+ Proto("VkResult", "CreateRenderPass",
+ [Param("VkDevice", "device"),
+ Param("const VkRenderPassCreateInfo*", "pCreateInfo"),
+ Param("VkRenderPass*", "pRenderPass")]),
Proto("void", "CmdBeginRenderPass",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("const VK_RENDER_PASS_BEGIN*", "pRenderPassBegin")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("const VkRenderPassBegin*", "pRenderPassBegin")]),
Proto("void", "CmdEndRenderPass",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
- Param("VK_RENDER_PASS", "renderPass")]),
+ [Param("VkCmdBuffer", "cmdBuffer"),
+ Param("VkRenderPass", "renderPass")]),
- Proto("VK_RESULT", "DbgSetValidationLevel",
- [Param("VK_DEVICE", "device"),
- Param("VK_VALIDATION_LEVEL", "validationLevel")]),
+ Proto("VkResult", "DbgSetValidationLevel",
+ [Param("VkDevice", "device"),
+ Param("VkValidationLevel", "validationLevel")]),
- Proto("VK_RESULT", "DbgRegisterMsgCallback",
- [Param("VK_INSTANCE", "instance"),
+ Proto("VkResult", "DbgRegisterMsgCallback",
+ [Param("VkInstance", "instance"),
Param("VK_DBG_MSG_CALLBACK_FUNCTION", "pfnMsgCallback"),
Param("void*", "pUserData")]),
- Proto("VK_RESULT", "DbgUnregisterMsgCallback",
- [Param("VK_INSTANCE", "instance"),
+ Proto("VkResult", "DbgUnregisterMsgCallback",
+ [Param("VkInstance", "instance"),
Param("VK_DBG_MSG_CALLBACK_FUNCTION", "pfnMsgCallback")]),
- Proto("VK_RESULT", "DbgSetMessageFilter",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "DbgSetMessageFilter",
+ [Param("VkDevice", "device"),
Param("int32_t", "msgCode"),
Param("VK_DBG_MSG_FILTER", "filter")]),
- Proto("VK_RESULT", "DbgSetObjectTag",
- [Param("VK_BASE_OBJECT", "object"),
+ Proto("VkResult", "DbgSetObjectTag",
+ [Param("VkBaseObject", "object"),
Param("size_t", "tagSize"),
Param("const void*", "pTag")]),
- Proto("VK_RESULT", "DbgSetGlobalOption",
- [Param("VK_INSTANCE", "instance"),
+ Proto("VkResult", "DbgSetGlobalOption",
+ [Param("VkInstance", "instance"),
Param("VK_DBG_GLOBAL_OPTION", "dbgOption"),
Param("size_t", "dataSize"),
Param("const void*", "pData")]),
- Proto("VK_RESULT", "DbgSetDeviceOption",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "DbgSetDeviceOption",
+ [Param("VkDevice", "device"),
Param("VK_DBG_DEVICE_OPTION", "dbgOption"),
Param("size_t", "dataSize"),
Param("const void*", "pData")]),
Proto("void", "CmdDbgMarkerBegin",
- [Param("VK_CMD_BUFFER", "cmdBuffer"),
+ [Param("VkCmdBuffer", "cmdBuffer"),
Param("const char*", "pMarker")]),
Proto("void", "CmdDbgMarkerEnd",
- [Param("VK_CMD_BUFFER", "cmdBuffer")]),
+ [Param("VkCmdBuffer", "cmdBuffer")]),
],
)
@@ -887,78 +887,78 @@ wsi_x11 = Extension(
headers=["vkWsiX11Ext.h"],
objects=[],
protos=[
- Proto("VK_RESULT", "WsiX11AssociateConnection",
- [Param("VK_PHYSICAL_GPU", "gpu"),
+ Proto("VkResult", "WsiX11AssociateConnection",
+ [Param("VkPhysicalGpu", "gpu"),
Param("const VK_WSI_X11_CONNECTION_INFO*", "pConnectionInfo")]),
- Proto("VK_RESULT", "WsiX11GetMSC",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "WsiX11GetMSC",
+ [Param("VkDevice", "device"),
Param("xcb_window_t", "window"),
Param("xcb_randr_crtc_t", "crtc"),
Param("uint64_t*", "pMsc")]),
- Proto("VK_RESULT", "WsiX11CreatePresentableImage",
- [Param("VK_DEVICE", "device"),
+ Proto("VkResult", "WsiX11CreatePresentableImage",
+ [Param("VkDevice", "device"),
Param("const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO*", "pCreateInfo"),
- Param("VK_IMAGE*", "pImage"),
- Param("VK_GPU_MEMORY*", "pMem")]),
+ Param("VkImage*", "pImage"),
+ Param("VkGpuMemory*", "pMem")]),
- Proto("VK_RESULT", "WsiX11QueuePresent",
- [Param("VK_QUEUE", "queue"),
+ Proto("VkResult", "WsiX11QueuePresent",
+ [Param("VkQueue", "queue"),
Param("const VK_WSI_X11_PRESENT_INFO*", "pPresentInfo"),
- Param("VK_FENCE", "fence")]),
+ Param("VkFence", "fence")]),
],
)
extensions = [core, wsi_x11]
object_root_list = [
- "VK_INSTANCE",
- "VK_PHYSICAL_GPU",
- "VK_BASE_OBJECT"
+ "VkInstance",
+ "VkPhysicalGpu",
+ "VkBaseObject"
]
object_base_list = [
- "VK_DEVICE",
- "VK_QUEUE",
- "VK_GPU_MEMORY",
- "VK_OBJECT"
+ "VkDevice",
+ "VkQueue",
+ "VkGpuMemory",
+ "VkObject"
]
object_list = [
- "VK_BUFFER",
- "VK_BUFFER_VIEW",
- "VK_IMAGE",
- "VK_IMAGE_VIEW",
- "VK_COLOR_ATTACHMENT_VIEW",
- "VK_DEPTH_STENCIL_VIEW",
- "VK_SHADER",
- "VK_PIPELINE",
- "VK_SAMPLER",
- "VK_DESCRIPTOR_SET",
- "VK_DESCRIPTOR_SET_LAYOUT",
- "VK_DESCRIPTOR_SET_LAYOUT_CHAIN",
- "VK_DESCRIPTOR_POOL",
- "VK_DYNAMIC_STATE_OBJECT",
- "VK_CMD_BUFFER",
- "VK_FENCE",
- "VK_SEMAPHORE",
- "VK_EVENT",
- "VK_QUERY_POOL",
- "VK_FRAMEBUFFER",
- "VK_RENDER_PASS"
+ "VkBuffer",
+ "VkBufferView",
+ "VkImage",
+ "VkImageView",
+ "VkColorAttachmentView",
+ "VkDepthStencilView",
+ "VkShader",
+ "VkPipeline",
+ "VkSampler",
+ "VkDescriptorSet",
+ "VkDescriptorSetLayout",
+ "VkDescriptorSetLayoutChain",
+ "VkDescriptorPool",
+ "VkDynamicStateObject",
+ "VkCmdBuffer",
+ "VkFence",
+ "VkSemaphore",
+ "VkEvent",
+ "VkQueryPool",
+ "VkFramebuffer",
+ "VkRenderPass"
]
object_dynamic_state_list = [
- "VK_DYNAMIC_VP_STATE_OBJECT",
- "VK_DYNAMIC_RS_STATE_OBJECT",
- "VK_DYNAMIC_CB_STATE_OBJECT",
- "VK_DYNAMIC_DS_STATE_OBJECT"
+ "VkDynamicVpStateObject",
+ "VkDynamicRsStateObject",
+ "VkDynamicCbStateObject",
+ "VkDynamicDsStateObject"
]
object_type_list = object_root_list + object_base_list + object_list + object_dynamic_state_list
-object_parent_list = ["VK_BASE_OBJECT", "VK_OBJECT", "VK_DYNAMIC_STATE_OBJECT"]
+object_parent_list = ["VkBaseObject", "VkObject", "VkDynamicStateObject"]
headers = []
objects = []
@@ -989,8 +989,8 @@ def parse_vk_h(filename):
# parse proto_lines to protos
protos = []
for line in proto_lines:
- first, rest = line.split(" (VKAPI *vk")
- second, third = rest.split("Type)(")
+ first, rest = line.split(" (VKAPI *PFN_vk")
+ second, third = rest.split(")(")
# get the return type, no space before "*"
proto_ret = "*".join([t.rstrip() for t in first.split("*")])