aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTobin Ehlis <tobine@google.com>2015-12-01 09:48:58 -0700
committerMark Lobodzinski <mark@lunarg.com>2015-12-07 14:36:49 -0700
commitfaf9dac98d1d8f0e83b045cce23937619da8ae0a (patch)
tree14fe4aa25e1e797f284dfc00e02efb6353abec6e
parentb00af418b6f7c7a811e8c73bdc5b2cccb84bcba3 (diff)
downloadusermoji-faf9dac98d1d8f0e83b045cce23937619da8ae0a.tar.xz
layers: MR75, Merge DrawState and ShaderChecker into DrawState layer
This is the initial "dumb" merge where a few data structures are now duplicated within DrawState. This is intentional to simplify the transition and a follow-on commit will fix these inefficiencies. Conflicts: layers/draw_state.cpp
-rw-r--r--demos/cube.c2
-rw-r--r--layers/CMakeLists.txt1
-rw-r--r--layers/README.md7
-rw-r--r--layers/draw_state.cpp1046
-rwxr-xr-xlayers/draw_state.h25
-rw-r--r--layers/linux/shader_checker.json11
-rw-r--r--layers/shader_checker.cpp1340
-rw-r--r--layers/shader_checker.h42
-rw-r--r--layers/vk_layer_settings.txt5
-rw-r--r--layers/vk_validation_layer_details.md59
-rw-r--r--layers/windows/shader_checker.json11
-rwxr-xr-xvk_layer_documentation_generate.py8
12 files changed, 1016 insertions, 1541 deletions
diff --git a/demos/cube.c b/demos/cube.c
index 30488ecd..3acb8fa3 100644
--- a/demos/cube.c
+++ b/demos/cube.c
@@ -2113,7 +2113,6 @@ static void demo_init_vk(struct demo *demo)
"VK_LAYER_LUNARG_ObjectTracker",
"VK_LAYER_LUNARG_DrawState",
"VK_LAYER_LUNARG_ParamChecker",
- "VK_LAYER_LUNARG_ShaderChecker",
"VK_LAYER_LUNARG_Swapchain",
"VK_LAYER_LUNARG_DeviceLimits",
"VK_LAYER_LUNARG_Image",
@@ -2125,7 +2124,6 @@ static void demo_init_vk(struct demo *demo)
"VK_LAYER_LUNARG_ObjectTracker",
"VK_LAYER_LUNARG_DrawState",
"VK_LAYER_LUNARG_ParamChecker",
- "VK_LAYER_LUNARG_ShaderChecker",
"VK_LAYER_LUNARG_Swapchain",
"VK_LAYER_LUNARG_DeviceLimits",
"VK_LAYER_LUNARG_Image",
diff --git a/layers/CMakeLists.txt b/layers/CMakeLists.txt
index 77104c7f..63a79dc1 100644
--- a/layers/CMakeLists.txt
+++ b/layers/CMakeLists.txt
@@ -145,7 +145,6 @@ add_vk_layer(Multi multi.cpp vk_layer_table.cpp)
add_vk_layer(DrawState draw_state.cpp vk_layer_debug_marker_table.cpp vk_layer_table.cpp)
add_vk_layer(DeviceLimits device_limits.cpp vk_layer_debug_marker_table.cpp vk_layer_table.cpp vk_layer_utils.cpp)
add_vk_layer(MemTracker mem_tracker.cpp vk_layer_table.cpp)
-add_vk_layer(ShaderChecker shader_checker.cpp vk_layer_table.cpp)
add_vk_layer(Image image.cpp vk_layer_table.cpp)
add_vk_layer(ParamChecker param_checker.cpp vk_layer_debug_marker_table.cpp vk_layer_table.cpp)
add_vk_layer(ScreenShot screenshot.cpp vk_layer_table.cpp)
diff --git a/layers/README.md b/layers/README.md
index 66b76207..bd7a50d9 100644
--- a/layers/README.md
+++ b/layers/README.md
@@ -49,8 +49,8 @@ For complete details of current validation layers, including all of the validati
### Print Object Stats
(build dir)/layers/object_track.cpp (name=VK_LAYER_LUNARG_ObjectTracker) - Track object creation, use, and destruction. As objects are created, they're stored in a map. As objects are used, the layer verifies they exist in the map, flagging errors for unknown objects. As objects are destroyed, they're removed from the map. At vkDestroyDevice() and vkDestroyInstance() times, if any objects have not been destroyed, they are reported as leaked objects. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
-### Validate Draw State
-layers/draw\_state.cpp (name=VK_LAYER_LUNARG_DrawState) - DrawState tracks the Descriptor Set, Pipeline State, and dynamic state performing some point validation as states are created and used, and further validation at each Draw call. Of primary interest is making sure that the resources bound to Descriptor Sets correctly align with the layout specified for the Set. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
+### Validate Draw State and Shaders
+layers/draw\_state.cpp (name=VK_LAYER_LUNARG_DrawState) - DrawState tracks the Descriptor Set, Pipeline State, Shaders, and dynamic state performing some point validation as states are created and used, and further validation at each Draw call. Of primary interest is making sure that the resources bound to Descriptor Sets correctly align with the layout specified for the Set. Additionally DrawState include sharder validation (formerly separate ShaderChecker layer) that inspects the SPIR-V shader images and fixed function pipeline stages at PSO creation time. It flags errors when inconsistencies are found across interfaces between shader stages. The exact behavior of the checks depends on the pair of pipeline stages involved. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
### Track GPU Memory
layers/mem\_tracker.cpp (name=VK_LAYER_LUNARG_MemTracker) - The MemTracker layer tracks memory objects and references and validates that they are managed correctly by the application. This includes tracking object bindings, memory hazards, and memory object lifetimes. MemTracker validates several other hazard-related issues related to command buffers, fences, and memory mapping. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
@@ -67,9 +67,6 @@ layers/image.cpp (name=VK_LAYER_LUNARG_Image) - The Image layer is intended to v
### Swapchain
<build dir>/layer/swapchain.cpp (name=VK_LAYER_LUNARG_Swapchain) - Check that WSI extensions are being used correctly.
-### Validate Shaders
-<build dir>/layers/shader_checker.cpp (name=VK_LAYER_LUNARG_ShaderChecker) - The ShaderChecker layer inspects the SPIR-V shader images and fixed function pipeline stages at PSO creation time. It flags errors when inconsistencies are found across interfaces between shader stages. The exact behavior of the checks depends on the pair of pipeline stages involved. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
-
### Device Limitations
layers/device_limits.cpp (name=VK_LAYER_LUNARG_DeviceLimits) - This layer is intended to capture underlying device features and limitations and then flag errors if an app makes requests for unsupported features or exceeding limitations. This layer is a work in progress and currently only flags some high-level errors without flagging errors on specific feature and limitation. If a Dbg callback function is registered, this layer will use callback function(s) for reporting, otherwise uses stdout.
diff --git a/layers/draw_state.cpp b/layers/draw_state.cpp
index 1f03b160..d5db9205 100644
--- a/layers/draw_state.cpp
+++ b/layers/draw_state.cpp
@@ -24,16 +24,22 @@
* Author: Cody Northrop <cody@lunarg.com>
* Author: Michael Lentine <mlentine@google.com>
* Author: Tobin Ehlis <tobin@lunarg.com>
+ * Author: Chia-I Wu <olv@lunarg.com>
+ * Author: Chris Forbes <chrisf@ijw.co.nz>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <algorithm>
-#include <memory>
+#include <assert.h>
#include <unordered_map>
#include <unordered_set>
+#include <map>
+#include <string>
+#include <iostream>
+#include <algorithm>
#include <list>
+#include <spirv.hpp>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
@@ -59,12 +65,19 @@
// disable until corner cases are fixed
#define DISABLE_IMAGE_LAYOUT_VALIDATION
+using std::unordered_map;
+using std::unordered_set;
+
struct devExts {
VkBool32 debug_marker_enabled;
VkBool32 wsi_enabled;
unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE*> swapchainMap;
};
+// fwd decls
+struct shader_module;
+struct render_pass;
+
struct layer_data {
debug_report_data *report_data;
// TODO: put instance data here
@@ -90,6 +103,11 @@ struct layer_data {
unordered_map<VkFramebuffer, VkFramebufferCreateInfo*> frameBufferMap;
unordered_map<VkImage, IMAGE_NODE*> imageLayoutMap;
unordered_map<VkRenderPass, RENDER_PASS_NODE*> renderPassMap;
+ // Data structs from shaderChecker TODO : Merge 3 duplicate maps with one above
+ unordered_map<VkShaderModule, shader_module *> shader_module_map;
+ unordered_map<VkDescriptorSetLayout, std::unordered_set<uint32_t>*> descriptor_set_layout_map;
+ unordered_map<VkPipelineLayout, std::vector<std::unordered_set<uint32_t>*>*> pipeline_layout_map;
+ unordered_map<VkRenderPass, render_pass *> render_pass_map;
// Current render pass
VkRenderPassBeginInfo renderPassBeginInfo;
uint32_t currentSubpass;
@@ -101,11 +119,56 @@ struct layer_data {
device_extensions()
{};
};
+// Code imported from ShaderChecker
+static void
+build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index);
+
+struct shader_module {
+ /* the spirv image itself */
+ vector<uint32_t> words;
+ /* a mapping of <id> to the first word of its def. this is useful because walking type
+ * trees requires jumping all over the instruction stream.
+ */
+ unordered_map<unsigned, unsigned> type_def_index;
+
+ shader_module(VkShaderModuleCreateInfo const *pCreateInfo) :
+ words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
+ type_def_index() {
+
+ build_type_def_index(words, type_def_index);
+ }
+};
+
+struct render_pass {
+ vector<std::vector<VkFormat>> subpass_color_formats;
+
+ render_pass(VkRenderPassCreateInfo const *pCreateInfo)
+ {
+ uint32_t i;
+
+ subpass_color_formats.reserve(pCreateInfo->subpassCount);
+ for (i = 0; i < pCreateInfo->subpassCount; i++) {
+ const VkSubpassDescription *subpass = &pCreateInfo->pSubpasses[i];
+ vector<VkFormat> color_formats;
+ uint32_t j;
+
+ color_formats.reserve(subpass->colorAttachmentCount);
+ for (j = 0; j < subpass->colorAttachmentCount; j++) {
+ const uint32_t att = subpass->pColorAttachments[j].attachment;
+ const VkFormat format = pCreateInfo->pAttachments[att].format;
+
+ color_formats.push_back(pCreateInfo->pAttachments[att].format);
+ }
+
+ subpass_color_formats.push_back(color_formats);
+ }
+ }
+};
+
// TODO : Do we need to guard access to layer_data_map w/ lock?
-static std::unordered_map<void *, layer_data *> layer_data_map;
+static unordered_map<void *, layer_data *> layer_data_map;
static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(g_initOnce);
-
// TODO : This can be much smarter, using separate locks for separate global data
static int globalLockInitialized = 0;
static loader_platform_thread_mutex globalLock;
@@ -235,10 +298,739 @@ static string cmdTypeToString(CMD_TYPE cmd)
return "UNKNOWN";
}
}
+
+// SPIRV utility functions
+static void
+build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index)
+{
+ unsigned int const *code = (unsigned int const *)&words[0];
+ size_t size = words.size();
+
+ unsigned word = 5;
+ while (word < size) {
+ unsigned opcode = code[word] & 0x0ffffu;
+ unsigned oplen = (code[word] & 0xffff0000u) >> 16;
+
+ switch (opcode) {
+ case spv::OpTypeVoid:
+ case spv::OpTypeBool:
+ case spv::OpTypeInt:
+ case spv::OpTypeFloat:
+ case spv::OpTypeVector:
+ case spv::OpTypeMatrix:
+ case spv::OpTypeImage:
+ case spv::OpTypeSampler:
+ case spv::OpTypeSampledImage:
+ case spv::OpTypeArray:
+ case spv::OpTypeRuntimeArray:
+ case spv::OpTypeStruct:
+ case spv::OpTypeOpaque:
+ case spv::OpTypePointer:
+ case spv::OpTypeFunction:
+ case spv::OpTypeEvent:
+ case spv::OpTypeDeviceEvent:
+ case spv::OpTypeReserveId:
+ case spv::OpTypeQueue:
+ case spv::OpTypePipe:
+ type_def_index[code[word+1]] = word;
+ break;
+
+ default:
+ /* We only care about type definitions */
+ break;
+ }
+
+ word += oplen;
+ }
+}
+
+bool
+shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo)
+{
+ uint32_t *words = (uint32_t *)pCreateInfo->pCode;
+ size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
+
+ /* Just validate that the header makes sense. */
+ return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
+}
+
+static char const *
+storage_class_name(unsigned sc)
+{
+ switch (sc) {
+ case spv::StorageClassInput: return "input";
+ case spv::StorageClassOutput: return "output";
+ case spv::StorageClassUniformConstant: return "const uniform";
+ case spv::StorageClassUniform: return "uniform";
+ case spv::StorageClassWorkgroup: return "workgroup local";
+ case spv::StorageClassCrossWorkgroup: return "workgroup global";
+ case spv::StorageClassPrivate: return "private global";
+ case spv::StorageClassFunction: return "function";
+ case spv::StorageClassGeneric: return "generic";
+ case spv::StorageClassAtomicCounter: return "atomic counter";
+ case spv::StorageClassImage: return "image";
+ default: return "unknown";
+ }
+}
+
+/* returns ptr to null terminator */
+static char *
+describe_type(char *dst, shader_module const *src, unsigned type)
+{
+ auto type_def_it = src->type_def_index.find(type);
+
+ if (type_def_it == src->type_def_index.end()) {
+ return dst + sprintf(dst, "undef");
+ }
+
+ unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
+ unsigned opcode = code[0] & 0x0ffffu;
+ switch (opcode) {
+ case spv::OpTypeBool:
+ return dst + sprintf(dst, "bool");
+ case spv::OpTypeInt:
+ return dst + sprintf(dst, "%cint%d", code[3] ? 's' : 'u', code[2]);
+ case spv::OpTypeFloat:
+ return dst + sprintf(dst, "float%d", code[2]);
+ case spv::OpTypeVector:
+ dst += sprintf(dst, "vec%d of ", code[3]);
+ return describe_type(dst, src, code[2]);
+ case spv::OpTypeMatrix:
+ dst += sprintf(dst, "mat%d of ", code[3]);
+ return describe_type(dst, src, code[2]);
+ case spv::OpTypeArray:
+ dst += sprintf(dst, "arr[%d] of ", code[3]);
+ return describe_type(dst, src, code[2]);
+ case spv::OpTypePointer:
+ dst += sprintf(dst, "ptr to %s ", storage_class_name(code[2]));
+ return describe_type(dst, src, code[3]);
+ case spv::OpTypeStruct:
+ {
+ unsigned oplen = code[0] >> 16;
+ dst += sprintf(dst, "struct of (");
+ for (unsigned i = 2; i < oplen; i++) {
+ dst = describe_type(dst, src, code[i]);
+ dst += sprintf(dst, i == oplen-1 ? ")" : ", ");
+ }
+ return dst;
+ }
+ case spv::OpTypeSampler:
+ return dst + sprintf(dst, "sampler");
+ default:
+ return dst + sprintf(dst, "oddtype");
+ }
+}
+
+static bool
+types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed)
+{
+ auto a_type_def_it = a->type_def_index.find(a_type);
+ auto b_type_def_it = b->type_def_index.find(b_type);
+
+ if (a_type_def_it == a->type_def_index.end()) {
+ return false;
+ }
+
+ if (b_type_def_it == b->type_def_index.end()) {
+ return false;
+ }
+
+ /* walk two type trees together, and complain about differences */
+ unsigned int const *a_code = (unsigned int const *)&a->words[a_type_def_it->second];
+ unsigned int const *b_code = (unsigned int const *)&b->words[b_type_def_it->second];
+
+ unsigned a_opcode = a_code[0] & 0x0ffffu;
+ unsigned b_opcode = b_code[0] & 0x0ffffu;
+
+ if (b_arrayed && b_opcode == spv::OpTypeArray) {
+ /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
+ return types_match(a, b, a_type, b_code[2], false);
+ }
+
+ if (a_opcode != b_opcode) {
+ return false;
+ }
+
+ switch (a_opcode) {
+ /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
+ case spv::OpTypeBool:
+ return true && !b_arrayed;
+ case spv::OpTypeInt:
+ /* match on width, signedness */
+ return a_code[2] == b_code[2] && a_code[3] == b_code[3] && !b_arrayed;
+ case spv::OpTypeFloat:
+ /* match on width */
+ return a_code[2] == b_code[2] && !b_arrayed;
+ case spv::OpTypeVector:
+ case spv::OpTypeMatrix:
+ case spv::OpTypeArray:
+ /* match on element type, count. these all have the same layout. we don't get here if
+ * b_arrayed -- that is handled above. */
+ return !b_arrayed && types_match(a, b, a_code[2], b_code[2], b_arrayed) && a_code[3] == b_code[3];
+ case spv::OpTypeStruct:
+ /* match on all element types */
+ {
+ if (b_arrayed) {
+ /* for the purposes of matching different levels of arrayness, structs are leaves. */
+ return false;
+ }
+
+ unsigned a_len = a_code[0] >> 16;
+ unsigned b_len = b_code[0] >> 16;
+
+ if (a_len != b_len) {
+ return false; /* structs cannot match if member counts differ */
+ }
+
+ for (unsigned i = 2; i < a_len; i++) {
+ if (!types_match(a, b, a_code[i], b_code[i], b_arrayed)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ case spv::OpTypePointer:
+ /* match on pointee type. storage class is expected to differ */
+ return types_match(a, b, a_code[3], b_code[3], b_arrayed);
+
+ default:
+ /* remaining types are CLisms, or may not appear in the interfaces we
+ * are interested in. Just claim no match.
+ */
+ return false;
+
+ }
+}
+
+static int
+value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def)
+{
+ auto it = map.find(id);
+ if (it == map.end())
+ return def;
+ else
+ return it->second;
+}
+
+
+static unsigned
+get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level)
+{
+ auto type_def_it = src->type_def_index.find(type);
+
+ if (type_def_it == src->type_def_index.end()) {
+ return 1; /* This is actually broken SPIR-V... */
+ }
+
+ unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
+ unsigned opcode = code[0] & 0x0ffffu;
+
+ switch (opcode) {
+ case spv::OpTypePointer:
+ /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
+ * we're never actually passing pointers around. */
+ return get_locations_consumed_by_type(src, code[3], strip_array_level);
+ case spv::OpTypeArray:
+ if (strip_array_level) {
+ return get_locations_consumed_by_type(src, code[2], false);
+ }
+ else {
+ return code[3] * get_locations_consumed_by_type(src, code[2], false);
+ }
+ case spv::OpTypeMatrix:
+ /* num locations is the dimension * element size */
+ return code[3] * get_locations_consumed_by_type(src, code[2], false);
+ default:
+ /* everything else is just 1. */
+ return 1;
+
+ /* TODO: extend to handle 64bit scalar types, whose vectors may need
+ * multiple locations. */
+ }
+}
+
+
+struct interface_var {
+ uint32_t id;
+ uint32_t type_id;
+ uint32_t offset;
+ /* TODO: collect the name, too? Isn't required to be present. */
+};
+
+static void
+collect_interface_by_location(layer_data *my_data, VkDevice dev,
+ shader_module const *src, spv::StorageClass sinterface,
+ std::map<uint32_t, interface_var> &out,
+ std::map<uint32_t, interface_var> &builtins_out,
+ bool is_array_of_verts)
+{
+ unsigned int const *code = (unsigned int const *)&src->words[0];
+ size_t size = src->words.size();
+
+ std::unordered_map<unsigned, unsigned> var_locations;
+ std::unordered_map<unsigned, unsigned> var_builtins;
+
+ unsigned word = 5;
+ while (word < size) {
+
+ unsigned opcode = code[word] & 0x0ffffu;
+ unsigned oplen = (code[word] & 0xffff0000u) >> 16;
+
+ /* We consider two interface models: SSO rendezvous-by-location, and
+ * builtins. Complain about anything that fits neither model.
+ */
+ if (opcode == spv::OpDecorate) {
+ if (code[word+2] == spv::DecorationLocation) {
+ var_locations[code[word+1]] = code[word+3];
+ }
+
+ if (code[word+2] == spv::DecorationBuiltIn) {
+ var_builtins[code[word+1]] = code[word+3];
+ }
+ }
+
+ /* TODO: handle grouped decorations */
+ /* TODO: handle index=1 dual source outputs from FS -- two vars will
+ * have the same location, and we DONT want to clobber. */
+
+ if (opcode == spv::OpVariable && code[word+3] == sinterface) {
+ unsigned id = code[word+2];
+ unsigned type = code[word+1];
+
+ int location = value_or_default(var_locations, code[word+2], -1);
+ int builtin = value_or_default(var_builtins, code[word+2], -1);
+
+ if (location == -1 && builtin == -1) {
+ /* No location defined, and not bound to an API builtin.
+ * The spec says nothing about how this case works (or doesn't)
+ * for interface matching.
+ */
+ log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
+ "var %d (type %d) in %s interface has no Location or Builtin decoration",
+ code[word+2], code[word+1], storage_class_name(sinterface));
+ }
+ else if (location != -1) {
+ /* A user-defined interface variable, with a location. Where a variable
+ * occupied multiple locations, emit one result for each. */
+ unsigned num_locations = get_locations_consumed_by_type(src, type,
+ is_array_of_verts);
+ for (int offset = 0; offset < num_locations; offset++) {
+ interface_var v;
+ v.id = id;
+ v.type_id = type;
+ v.offset = offset;
+ out[location + offset] = v;
+ }
+ }
+ else {
+ /* A builtin interface variable */
+ /* Note that since builtin interface variables do not consume numbered
+ * locations, there is no larger-than-vec4 consideration as above
+ */
+ interface_var v;
+ v.id = id;
+ v.type_id = type;
+ v.offset = 0;
+ builtins_out[builtin] = v;
+ }
+ }
+
+ word += oplen;
+ }
+}
+
+static void
+collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev,
+ shader_module const *src, spv::StorageClass sinterface,
+ std::map<std::pair<unsigned, unsigned>, interface_var> &out)
+{
+ unsigned int const *code = (unsigned int const *)&src->words[0];
+ size_t size = src->words.size();
+
+ std::unordered_map<unsigned, unsigned> var_sets;
+ std::unordered_map<unsigned, unsigned> var_bindings;
+
+ unsigned word = 5;
+ while (word < size) {
+
+ unsigned opcode = code[word] & 0x0ffffu;
+ unsigned oplen = (code[word] & 0xffff0000u) >> 16;
+
+ /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
+ * DecorationDescriptorSet and DecorationBinding.
+ */
+ if (opcode == spv::OpDecorate) {
+ if (code[word+2] == spv::DecorationDescriptorSet) {
+ var_sets[code[word+1]] = code[word+3];
+ }
+
+ if (code[word+2] == spv::DecorationBinding) {
+ var_bindings[code[word+1]] = code[word+3];
+ }
+ }
+
+ if (opcode == spv::OpVariable && (code[word+3] == spv::StorageClassUniform ||
+ code[word+3] == spv::StorageClassUniformConstant)) {
+ unsigned set = value_or_default(var_sets, code[word+2], 0);
+ unsigned binding = value_or_default(var_bindings, code[word+2], 0);
+
+ auto existing_it = out.find(std::make_pair(set, binding));
+ if (existing_it != out.end()) {
+ /* conflict within spv image */
+ log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0,
+ SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
+ "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
+ code[word+2], code[word+1], storage_class_name(sinterface),
+ existing_it->first.first, existing_it->first.second);
+ }
+
+ interface_var v;
+ v.id = code[word+2];
+ v.type_id = code[word+1];
+ out[std::make_pair(set, binding)] = v;
+ }
+
+ word += oplen;
+ }
+}
+
+static bool
+validate_interface_between_stages(layer_data *my_data, VkDevice dev,
+ shader_module const *producer, char const *producer_name,
+ shader_module const *consumer, char const *consumer_name,
+ bool consumer_arrayed_input)
+{
+ std::map<uint32_t, interface_var> outputs;
+ std::map<uint32_t, interface_var> inputs;
+
+ std::map<uint32_t, interface_var> builtin_outputs;
+ std::map<uint32_t, interface_var> builtin_inputs;
+
+ bool pass = true;
+
+ collect_interface_by_location(my_data, dev, producer, spv::StorageClassOutput, outputs, builtin_outputs, false);
+ collect_interface_by_location(my_data, dev, consumer, spv::StorageClassInput, inputs, builtin_inputs,
+ consumer_arrayed_input);
+
+ auto a_it = outputs.begin();
+ auto b_it = inputs.begin();
+
+ /* maps sorted by key (location); walk them together to find mismatches */
+ while ((outputs.size() > 0 && a_it != outputs.end()) || ( inputs.size() && b_it != inputs.end())) {
+ bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
+ bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
+ auto a_first = a_at_end ? 0 : a_it->first;
+ auto b_first = b_at_end ? 0 : b_it->first;
+
+ if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_PERF_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "%s writes to output location %d which is not consumed by %s", producer_name, a_first, consumer_name)) {
+ pass = false;
+ }
+ a_it++;
+ }
+ else if (a_at_end || a_first > b_first) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
+ "%s consumes input location %d which is not written by %s", consumer_name, b_first, producer_name)) {
+ pass = false;
+ }
+ b_it++;
+ }
+ else {
+ if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
+ /* OK! */
+ }
+ else {
+ char producer_type[1024];
+ char consumer_type[1024];
+ describe_type(producer_type, producer, a_it->second.type_id);
+ describe_type(consumer_type, consumer, b_it->second.type_id);
+
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
+ "Type mismatch on location %d: '%s' vs '%s'", a_it->first, producer_type, consumer_type)) {
+ pass = false;
+ }
+ }
+ a_it++;
+ b_it++;
+ }
+ }
+
+ return pass;
+}
+
+enum FORMAT_TYPE {
+ FORMAT_TYPE_UNDEFINED,
+ FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
+ FORMAT_TYPE_SINT,
+ FORMAT_TYPE_UINT,
+};
+
+static unsigned
+get_format_type(VkFormat fmt) {
+ switch (fmt) {
+ case VK_FORMAT_UNDEFINED:
+ return FORMAT_TYPE_UNDEFINED;
+ case VK_FORMAT_R8_SINT:
+ case VK_FORMAT_R8G8_SINT:
+ case VK_FORMAT_R8G8B8_SINT:
+ case VK_FORMAT_R8G8B8A8_SINT:
+ case VK_FORMAT_R16_SINT:
+ case VK_FORMAT_R16G16_SINT:
+ case VK_FORMAT_R16G16B16_SINT:
+ case VK_FORMAT_R16G16B16A16_SINT:
+ case VK_FORMAT_R32_SINT:
+ case VK_FORMAT_R32G32_SINT:
+ case VK_FORMAT_R32G32B32_SINT:
+ case VK_FORMAT_R32G32B32A32_SINT:
+ case VK_FORMAT_B8G8R8_SINT:
+ case VK_FORMAT_B8G8R8A8_SINT:
+ case VK_FORMAT_A2B10G10R10_SINT_PACK32:
+ case VK_FORMAT_A2R10G10B10_SINT_PACK32:
+ return FORMAT_TYPE_SINT;
+ case VK_FORMAT_R8_UINT:
+ case VK_FORMAT_R8G8_UINT:
+ case VK_FORMAT_R8G8B8_UINT:
+ case VK_FORMAT_R8G8B8A8_UINT:
+ case VK_FORMAT_R16_UINT:
+ case VK_FORMAT_R16G16_UINT:
+ case VK_FORMAT_R16G16B16_UINT:
+ case VK_FORMAT_R16G16B16A16_UINT:
+ case VK_FORMAT_R32_UINT:
+ case VK_FORMAT_R32G32_UINT:
+ case VK_FORMAT_R32G32B32_UINT:
+ case VK_FORMAT_R32G32B32A32_UINT:
+ case VK_FORMAT_B8G8R8_UINT:
+ case VK_FORMAT_B8G8R8A8_UINT:
+ case VK_FORMAT_A2B10G10R10_UINT_PACK32:
+ case VK_FORMAT_A2R10G10B10_UINT_PACK32:
+ return FORMAT_TYPE_UINT;
+ default:
+ return FORMAT_TYPE_FLOAT;
+ }
+}
+
+/* characterizes a SPIR-V type appearing in an interface to a FF stage,
+ * for comparison to a VkFormat's characterization above. */
+static unsigned
+get_fundamental_type(shader_module const *src, unsigned type)
+{
+ auto type_def_it = src->type_def_index.find(type);
+
+ if (type_def_it == src->type_def_index.end()) {
+ return FORMAT_TYPE_UNDEFINED;
+ }
+
+ unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
+ unsigned opcode = code[0] & 0x0ffffu;
+ switch (opcode) {
+ case spv::OpTypeInt:
+ return code[3] ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
+ case spv::OpTypeFloat:
+ return FORMAT_TYPE_FLOAT;
+ case spv::OpTypeVector:
+ return get_fundamental_type(src, code[2]);
+ case spv::OpTypeMatrix:
+ return get_fundamental_type(src, code[2]);
+ case spv::OpTypeArray:
+ return get_fundamental_type(src, code[2]);
+ case spv::OpTypePointer:
+ return get_fundamental_type(src, code[3]);
+ default:
+ return FORMAT_TYPE_UNDEFINED;
+ }
+}
+
+static bool
+validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi)
+{
+ /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
+ * each binding should be specified only once.
+ */
+ std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
+ bool pass = true;
+
+ for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
+ auto desc = &vi->pVertexBindingDescriptions[i];
+ auto & binding = bindings[desc->binding];
+ if (binding) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INCONSISTENT_VI, "SC",
+ "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
+ pass = false;
+ }
+ }
+ else {
+ binding = desc;
+ }
+ }
+
+ return pass;
+}
+
+static bool
+validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi, shader_module const *vs)
+{
+ std::map<uint32_t, interface_var> inputs;
+ /* we collect builtin inputs, but they will never appear in the VI state --
+ * the vs builtin inputs are generated in the pipeline, not sourced from buffers (VertexID, etc)
+ */
+ std::map<uint32_t, interface_var> builtin_inputs;
+ bool pass = true;
+
+ collect_interface_by_location(my_data, dev, vs, spv::StorageClassInput, inputs, builtin_inputs, false);
+
+ /* Build index by location */
+ std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
+ if (vi) {
+ for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
+ attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
+ }
+
+ auto it_a = attribs.begin();
+ auto it_b = inputs.begin();
+
+ while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
+ bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
+ bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
+ auto a_first = a_at_end ? 0 : it_a->first;
+ auto b_first = b_at_end ? 0 : it_b->first;
+ if (b_at_end || a_first < b_first) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_PERF_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "Vertex attribute at location %d not consumed by VS", a_first)) {
+ pass = false;
+ }
+ it_a++;
+ }
+ else if (a_at_end || b_first < a_first) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
+ "VS consumes input at location %d but not provided", b_first)) {
+ pass = false;
+ }
+ it_b++;
+ }
+ else {
+ unsigned attrib_type = get_format_type(it_a->second->format);
+ unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
+
+ /* type checking */
+ if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
+ char vs_type[1024];
+ describe_type(vs_type, vs, it_b->second.type_id);
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
+ "Attribute type of `%s` at location %d does not match VS input type of `%s`",
+ string_VkFormat(it_a->second->format), a_first, vs_type)) {
+ pass = false;
+ }
+ }
+
+ /* OK! */
+ it_a++;
+ it_b++;
+ }
+ }
+
+ return pass;
+}
+
+static bool
+validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs, render_pass const *rp, uint32_t subpass)
+{
+ const std::vector<VkFormat> &color_formats = rp->subpass_color_formats[subpass];
+ std::map<uint32_t, interface_var> outputs;
+ std::map<uint32_t, interface_var> builtin_outputs;
+ bool pass = true;
+
+ /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
+
+ collect_interface_by_location(my_data, dev, fs, spv::StorageClassOutput, outputs, builtin_outputs, false);
+
+ auto it = outputs.begin();
+ uint32_t attachment = 0;
+
+ /* Walk attachment list and outputs together -- this is a little overpowered since attachments
+ * are currently dense, but the parallel with matching between shader stages is nice.
+ */
+
+ /* TODO: Figure out compile error with cb->attachmentCount */
+ while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
+ if (attachment == color_formats.size() || ( it != outputs.end() && it->first < attachment)) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "FS writes to output location %d with no matching attachment", it->first)) {
+ pass = false;
+ }
+ it++;
+ }
+ else if (it == outputs.end() || it->first > attachment) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
+ "Attachment %d not written by FS", attachment)) {
+ pass = false;
+ }
+ attachment++;
+ }
+ else {
+ unsigned output_type = get_fundamental_type(fs, it->second.type_id);
+ unsigned att_type = get_format_type(color_formats[attachment]);
+
+ /* type checking */
+ if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
+ char fs_type[1024];
+ describe_type(fs_type, fs, it->second.type_id);
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
+ "Attachment %d of type `%s` does not match FS output type of `%s`",
+ attachment, string_VkFormat(color_formats[attachment]), fs_type)) {
+ pass = false;
+ }
+ }
+
+ /* OK! */
+ it++;
+ attachment++;
+ }
+ }
+
+ return pass;
+}
+
+
+struct shader_stage_attributes {
+ char const * const name;
+ bool arrayed_input;
+};
+
+
+static shader_stage_attributes
+shader_stage_attribs[] = {
+ { "vertex shader", false },
+ { "tessellation control shader", true },
+ { "tessellation evaluation shader", false },
+ { "geometry shader", true },
+ { "fragment shader", false },
+};
+
+
+static bool
+has_descriptor_binding(std::vector<std::unordered_set<uint32_t>*>* layout,
+ std::pair<unsigned, unsigned> slot)
+{
+ if (!layout)
+ return false;
+
+ if (slot.first >= layout->size())
+ return false;
+
+ auto set = (*layout)[slot.first];
+
+ return (set->find(slot.second) != set->end());
+}
+
+static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage)
+{
+ uint32_t bit_pos = u_ffs(stage);
+ return bit_pos-1;
+}
+
// Block of code at start here for managing/tracking Pipeline state that this layer cares about
-// Just track 2 shaders for now
-#define MAX_SLOTS 2048
-#define NUM_COMMAND_BUFFERS_TO_DISPLAY 10
static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
@@ -246,35 +1038,13 @@ static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
// to that same cmd buffer by separate thread are not changing state from underneath us
// Track the last cmd buffer touched by this thread
-static VkCommandBuffer g_lastCommandBuffer[MAX_TID] = {NULL};
-// Track the last group of CBs touched for displaying to dot file
-static GLOBAL_CB_NODE* g_pLastTouchedCB[NUM_COMMAND_BUFFERS_TO_DISPLAY] = {NULL};
-static uint32_t g_lastTouchedCBIndex = 0;
+
// Track the last global DrawState of interest touched by any thread
-static GLOBAL_CB_NODE* g_lastGlobalCB = NULL;
static PIPELINE_NODE* g_lastBoundPipeline = NULL;
#define MAX_BINDING 0xFFFFFFFF // Default vtxBinding value in CB Node to identify if no vtxBinding set
// prototype
static GLOBAL_CB_NODE* getCBNode(layer_data*, const VkCommandBuffer);
-// Update global ptrs to reflect that specified commandBuffer has been used
-static void updateCBTracking(GLOBAL_CB_NODE* pCB)
-{
- g_lastCommandBuffer[getTIDIndex()] = pCB->commandBuffer;
- loader_platform_thread_lock_mutex(&globalLock);
- g_lastGlobalCB = pCB;
- // TODO : This is a dumb algorithm. Need smart LRU that drops off oldest
- for (uint32_t i = 0; i < NUM_COMMAND_BUFFERS_TO_DISPLAY; i++) {
- if (g_pLastTouchedCB[i] == pCB) {
- loader_platform_thread_unlock_mutex(&globalLock);
- return;
- }
- }
- g_pLastTouchedCB[g_lastTouchedCBIndex++] = pCB;
- g_lastTouchedCBIndex = g_lastTouchedCBIndex % NUM_COMMAND_BUFFERS_TO_DISPLAY;
- loader_platform_thread_unlock_mutex(&globalLock);
-}
-
static VkBool32 hasDrawCmd(GLOBAL_CB_NODE* pCB)
{
for (uint32_t i=0; i<NUM_DRAW_TYPES; i++) {
@@ -441,6 +1211,106 @@ static bool verify_set_layout_compatibility(layer_data* my_data, const SET_NODE*
// }
// return skipCall;
//}
+static bool
+validate_graphics_pipeline(layer_data *my_data, VkDevice dev, VkGraphicsPipelineCreateInfo const *pCreateInfo)
+{
+ /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
+ * before trying to do anything more: */
+ int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
+ int geometry_stage = get_shader_stage_id(VK_SHADER_STAGE_GEOMETRY_BIT);
+ int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
+
+ shader_module **shaders = new shader_module*[fragment_stage + 1]; /* exclude CS */
+ memset(shaders, 0, sizeof(shader_module *) * (fragment_stage +1));
+ render_pass const *rp = 0;
+ VkPipelineVertexInputStateCreateInfo const *vi = 0;
+ bool pass = true;
+
+ for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+ VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
+ if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
+
+ if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT
+ | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_UNKNOWN_STAGE, "SC",
+ "Unknown shader stage %d", pStage->stage)) {
+ pass = false;
+ }
+ }
+ else {
+ shader_module *module = my_data->shader_module_map[pStage->module];
+ shaders[get_shader_stage_id(pStage->stage)] = module;
+
+ /* validate descriptor set layout against what the spirv module actually uses */
+ std::map<std::pair<unsigned, unsigned>, interface_var> descriptor_uses;
+ collect_interface_by_descriptor_slot(my_data, dev, module, spv::StorageClassUniform,
+ descriptor_uses);
+
+ auto layout = pCreateInfo->layout != VK_NULL_HANDLE ?
+ my_data->pipeline_layout_map[pCreateInfo->layout] : nullptr;
+
+ for (auto it = descriptor_uses.begin(); it != descriptor_uses.end(); it++) {
+
+ /* find the matching binding */
+ auto found = has_descriptor_binding(layout, it->first);
+
+ if (!found) {
+ char type_name[1024];
+ describe_type(type_name, module, it->second.type_id);
+ if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0,
+ SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
+ "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
+ it->first.first, it->first.second, type_name)) {
+ pass = false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (pCreateInfo->renderPass != VK_NULL_HANDLE)
+ rp = my_data->render_pass_map[pCreateInfo->renderPass];
+
+ vi = pCreateInfo->pVertexInputState;
+
+ if (vi) {
+ pass = validate_vi_consistency(my_data, dev, vi) && pass;
+ }
+
+ if (shaders[vertex_stage]) {
+ pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage]) && pass;
+ }
+
+ /* TODO: enforce rules about present combinations of shaders */
+ int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
+ int consumer = get_shader_stage_id(VK_SHADER_STAGE_GEOMETRY_BIT);
+
+ while (!shaders[producer] && producer != fragment_stage) {
+ producer++;
+ consumer++;
+ }
+
+ for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
+ assert(shaders[producer]);
+ if (shaders[consumer]) {
+ pass = validate_interface_between_stages(my_data, dev,
+ shaders[producer], shader_stage_attribs[producer].name,
+ shaders[consumer], shader_stage_attribs[consumer].name,
+ shader_stage_attribs[consumer].arrayed_input) && pass;
+
+ producer = consumer;
+ }
+ }
+
+ if (shaders[fragment_stage] && rp) {
+ pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], rp, pCreateInfo->subpass) && pass;
+ }
+
+ delete shaders;
+
+ return pass;
+}
// Validate overall state at the time of a draw call
static VkBool32 validate_draw_state(layer_data* my_data, GLOBAL_CB_NODE* pCB, VkBool32 indexedDraw) {
@@ -1763,7 +2633,6 @@ static void init_draw_state(layer_data *my_data)
if (!globalLockInitialized)
{
- // This mutex may be deleted by vkDestroyInstance of last instance.
loader_platform_thread_create_mutex(&globalLock);
globalLockInitialized = 1;
}
@@ -1933,11 +2802,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionPropert
uint32_t* pCount,
VkExtensionProperties* pProperties)
{
+ // DrawState does not have any physical device extensions
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
- return pTable->EnumerateDeviceExtensionProperties(
+ return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(
physicalDevice,
NULL,
pCount,
@@ -1954,7 +2823,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
uint32_t* pCount,
VkLayerProperties* pProperties)
{
- /* Mem tracker's physical device layers are the same as global */
+ /* DrawState physical device layers are the same as global */
return util_GetLayerProperties(ARRAY_SIZE(ds_device_layers), ds_device_layers,
pCount, pProperties);
}
@@ -2288,14 +3157,17 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
uint32_t i=0;
loader_platform_thread_lock_mutex(&globalLock);
+ bool pass = true;
for (i=0; i<count; i++) {
pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i], NULL);
- // TODOSC : merge in validate_graphics_pipeline() from ShaderChecker
+ // TODOSC : Merge SC validate* func w/ verifyPipelineCS func
+ pass = validate_graphics_pipeline(dev_data, device, &pCreateInfos[i]) && pass;
skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
}
loader_platform_thread_unlock_mutex(&globalLock);
- if (VK_FALSE == skipCall) {
+
+ if ((VK_FALSE == skipCall) && pass) {
result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device,
pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
loader_platform_thread_lock_mutex(&globalLock);
@@ -2440,6 +3312,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDev
// Put new node at Head of global Layer list
loader_platform_thread_lock_mutex(&globalLock);
dev_data->layoutMap[*pSetLayout] = pNewNode;
+ // TODOSC : Currently duplicating data struct here, need to unify
+ auto& bindings = dev_data->descriptor_set_layout_map[*pSetLayout];
+ bindings = new std::unordered_set<uint32_t>();
+ bindings->reserve(pCreateInfo->bindingCount);
+ for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++)
+ bindings->insert(pCreateInfo->pBinding[i].binding);
+
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
@@ -2461,6 +3340,15 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkP
for (i=0; i<pCreateInfo->pushConstantRangeCount; ++i) {
plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
}
+ // TODOSC : Code merged from SC duplicates data structs, need to unify
+ loader_platform_thread_lock_mutex(&globalLock);
+ auto& layouts = dev_data->pipeline_layout_map[*pPipelineLayout];
+ layouts = new vector<unordered_set<uint32_t>*>();
+ layouts->reserve(pCreateInfo->setLayoutCount);
+ for (unsigned i = 0; i < pCreateInfo->setLayoutCount; i++) {
+ layouts->push_back(dev_data->descriptor_set_layout_map[pCreateInfo->pSetLayouts[i]]);
+ }
+ loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
@@ -2619,7 +3507,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice
resetCB(dev_data, pCommandBuffer[i]);
pCB->commandBuffer = pCommandBuffer[i];
pCB->createInfo = *pCreateInfo;
- updateCBTracking(pCB);
}
}
}
@@ -2661,7 +3548,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuf
resetCB(dev_data, commandBuffer);
}
pCB->state = CB_UPDATE_ACTIVE;
- updateCBTracking(pCB);
}
return result;
}
@@ -2681,7 +3567,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffe
if (VK_FALSE == skipCall) {
result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
if (VK_SUCCESS == result) {
- updateCBTracking(pCB);
pCB->state = CB_UPDATE_COMPLETE;
// Reset CB status flags
pCB->status = 0;
@@ -2699,7 +3584,6 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(VkCommandBuf
VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
if (VK_SUCCESS == result) {
resetCB(dev_data, commandBuffer);
- updateCBTracking(getCBNode(dev_data, commandBuffer));
}
return result;
}
@@ -2711,7 +3595,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer com
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE);
if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
skipCall |= log_msg(dev_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PIPELINE, (uint64_t) pipeline,
@@ -2755,7 +3638,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
loader_platform_thread_lock_mutex(&globalLock);
pCB->status |= CBSTATUS_VIEWPORT_SET;
@@ -2780,7 +3662,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
loader_platform_thread_lock_mutex(&globalLock);
pCB->status |= CBSTATUS_SCISSOR_SET;
@@ -2802,7 +3683,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer com
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
/* TODO: Do we still need this lock? */
loader_platform_thread_lock_mutex(&globalLock);
@@ -2828,7 +3708,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
pCB->depthBiasConstantFactor = depthBiasConstantFactor;
@@ -2849,7 +3728,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffe
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE);
pCB->status |= CBSTATUS_BLEND_SET;
memcpy(pCB->blendConstants, blendConstants, 4 * sizeof(float));
@@ -2871,7 +3749,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
pCB->minDepthBounds = minDepthBounds;
@@ -2894,7 +3771,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
pCB->front.compareMask = compareMask;
@@ -2923,7 +3799,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
pCB->front.writeMask = writeMask;
@@ -2950,7 +3825,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
pCB->front.reference = reference;
@@ -3013,7 +3887,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuff
"Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!", (uint64_t) pDescriptorSets[i]);
}
}
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
// For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
if (firstSet > 0) { // Check set #s below the first bound set
@@ -3074,7 +3947,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindIndexBuffer()");
}
pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER);
}
if (VK_FALSE == skipCall)
@@ -3095,7 +3967,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
if (pCB->state == CB_UPDATE_ACTIVE) {
/* TODO: Need to track all the vertex buffers, not just last one */
pCB->lastVtxBinding = startBinding + bindingCount -1;
- updateCBTracking(pCB);
addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
@@ -3119,7 +3990,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuff
"vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DRAW);
}
} else {
@@ -3145,7 +4015,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer comm
"vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED);
}
} else {
@@ -3171,7 +4040,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer com
"vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT);
}
} else {
@@ -3197,7 +4065,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuf
"vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED_INDIRECT]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT);
}
} else {
@@ -3216,7 +4083,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer command
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdDispatch()");
@@ -3234,7 +4100,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdDispatchIndirect()");
@@ -3252,7 +4117,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer comma
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyBuffer()");
@@ -3339,7 +4203,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer comman
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyImage()");
@@ -3363,7 +4226,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer comman
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBlitImage()");
@@ -3384,7 +4246,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffe
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyBufferToImage()");
@@ -3406,7 +4267,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffe
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyImageToBuffer()");
@@ -3425,7 +4285,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer com
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdUpdateBuffer()");
@@ -3443,7 +4302,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer comma
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdFillBuffer()");
@@ -3473,7 +4331,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
"vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
" It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", reinterpret_cast<uint64_t>(commandBuffer));
}
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdClearAttachments()");
@@ -3531,7 +4388,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdClearColorImage()");
@@ -3554,7 +4410,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdClearDepthStencilImage()");
@@ -3575,7 +4430,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer com
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResolveImage()");
@@ -3593,7 +4447,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer command
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdSetEvent()");
@@ -3611,7 +4464,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer comma
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetEvent()");
@@ -3759,7 +4611,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(VkCommandBuffer comma
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
@@ -3778,7 +4629,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(VkCommandBuffer
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPipelineBarrier()");
@@ -3797,7 +4647,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer comma
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBeginQuery()");
@@ -3814,7 +4663,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer command
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
@@ -3831,7 +4679,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer c
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
@@ -3851,7 +4698,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBu
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
@@ -3870,7 +4716,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer c
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_UPDATE_ACTIVE) {
- updateCBTracking(pCB);
skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
} else {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
@@ -4118,6 +4963,33 @@ bool CreatePassDAG(const layer_data* my_data, VkDevice device, const VkRenderPas
}
// TODOSC : Add intercept of vkCreateShaderModule
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
+ VkDevice device,
+ const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkShaderModule *pShaderModule)
+{
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ bool skip_call = false;
+ if (!shader_is_spirv(pCreateInfo)) {
+ skip_call |= log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE,
+ /* dev */ 0, 0, SHADER_CHECKER_NON_SPIRV_SHADER, "SC",
+ "Shader is not SPIR-V");
+ }
+
+ if (skip_call)
+ return VK_ERROR_VALIDATION_FAILED;
+
+ VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
+
+ if (res == VK_SUCCESS) {
+ loader_platform_thread_lock_mutex(&globalLock);
+ my_data->shader_module_map[*pShaderModule] = new shader_module(pCreateInfo);
+ loader_platform_thread_unlock_mutex(&globalLock);
+ }
+ return res;
+}
+
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
{
bool skip_call = false;
@@ -4185,9 +5057,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic
localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
memcpy((void*)localRPCI->pDependencies, pCreateInfo->pDependencies, localRPCI->dependencyCount*sizeof(VkSubpassDependency));
}
+ loader_platform_thread_lock_mutex(&globalLock);
dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE();
dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
dev_data->renderPassMap[*pRenderPass]->createInfo = localRPCI;
+ // TODOSC : Duplicate data struct here, need to unify
+ dev_data->render_pass_map[*pRenderPass] = new render_pass(pCreateInfo);
+ loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
@@ -4340,7 +5216,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer
if (pRenderPassBegin && pRenderPassBegin->renderPass) {
skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
- updateCBTracking(pCB);
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS);
pCB->activeRenderPass = pRenderPassBegin->renderPass;
@@ -4371,7 +5246,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer comm
GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
if (pCB) {
- updateCBTracking(pCB);
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS);
pCB->activeSubpass++;
@@ -4393,7 +5267,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer co
TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
if (pCB) {
skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
- updateCBTracking(pCB);
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS);
TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
@@ -4421,7 +5294,6 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer
"vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.", (void*)pCommandBuffers[i], i);
}
}
- updateCBTracking(pCB);
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
}
@@ -4610,7 +5482,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDbgMarkerBegin(VkCommandBuffer c
"Attempt to use CmdDbgMarkerBegin but extension disabled!");
return;
} else if (pCB) {
- updateCBTracking(pCB);
+
skipCall |= addCmd(dev_data, pCB, CMD_DBGMARKERBEGIN);
}
if (VK_FALSE == skipCall)
@@ -4627,7 +5499,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDbgMarkerEnd(VkCommandBuffer com
"Attempt to use CmdDbgMarkerEnd but extension disabled!");
return;
} else if (pCB) {
- updateCBTracking(pCB);
+
skipCall |= addCmd(dev_data, pCB, CMD_DBGMARKEREND);
}
if (VK_FALSE == skipCall)
@@ -4817,6 +5689,8 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
return (PFN_vkVoidFunction) vkCmdWriteTimestamp;
if (!strcmp(funcName, "vkCreateFramebuffer"))
return (PFN_vkVoidFunction) vkCreateFramebuffer;
+ if (!strcmp(funcName, "vkCreateShaderModule"))
+ return (PFN_vkVoidFunction) vkCreateShaderModule;
if (!strcmp(funcName, "vkCreateRenderPass"))
return (PFN_vkVoidFunction) vkCreateRenderPass;
if (!strcmp(funcName, "vkCmdBeginRenderPass"))
@@ -4872,7 +5746,6 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
layer_init_instance_dispatch_table(my_data->instance_dispatch_table, wrapped_inst);
return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
}
- my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
if (!strcmp(funcName, "vkCreateInstance"))
return (PFN_vkVoidFunction) vkCreateInstance;
if (!strcmp(funcName, "vkDestroyInstance"))
@@ -4886,6 +5759,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
if (fptr)
return fptr;
diff --git a/layers/draw_state.h b/layers/draw_state.h
index ce77bea6..7d9d6b9f 100755
--- a/layers/draw_state.h
+++ b/layers/draw_state.h
@@ -1,6 +1,7 @@
/*
*
* Copyright (C) 2015 Valve Corporation
+ * Copyright (C) 2015 Google, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,15 +21,31 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
- * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
- * Author: Tobin Ehlis <tobin@lunarg.com>
+ * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
+ * Author: Tobin Ehlis <tobine@google.com>
+ * Author: Chris Forbes <chrisf@ijw.co.nz>
*/
#include "vulkan/vk_layer.h"
+#include "vulkan/vk_lunarg_debug_report.h"
#include <vector>
#include <memory>
-#include "vulkan/vk_lunarg_debug_report.h"
-using namespace std;
+using std::vector;
+
+/* Shader checker error codes */
+typedef enum _SHADER_CHECKER_ERROR
+{
+ SHADER_CHECKER_NONE,
+ SHADER_CHECKER_FS_MIXED_BROADCAST, /* FS writes broadcast output AND custom outputs */
+ SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, /* Type mismatch between shader stages or shader and pipeline */
+ SHADER_CHECKER_OUTPUT_NOT_CONSUMED, /* Entry appears in output interface, but missing in input */
+ SHADER_CHECKER_INPUT_NOT_PRODUCED, /* Entry appears in input interface, but missing in output */
+ SHADER_CHECKER_NON_SPIRV_SHADER, /* Shader image is not SPIR-V */
+ SHADER_CHECKER_INCONSISTENT_SPIRV, /* General inconsistency within a SPIR-V module */
+ SHADER_CHECKER_UNKNOWN_STAGE, /* Stage is not supported by analysis */
+ SHADER_CHECKER_INCONSISTENT_VI, /* VI state contains conflicting binding or attrib descriptions */
+ SHADER_CHECKER_MISSING_DESCRIPTOR, /* Shader attempts to use a descriptor binding not declared in the layout */
+} SHADER_CHECKER_ERROR;
// Draw State ERROR codes
typedef enum _DRAW_STATE_ERROR
diff --git a/layers/linux/shader_checker.json b/layers/linux/shader_checker.json
deleted file mode 100644
index 946dbde4..00000000
--- a/layers/linux/shader_checker.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "file_format_version" : "1.0.0",
- "layer" : {
- "name": "VK_LAYER_LUNARG_ShaderChecker",
- "type": "GLOBAL",
- "library_path": "./libVKLayerShaderChecker.so",
- "api_version": "0.210.0",
- "implementation_version": "1",
- "description": "LunarG Validation Layer"
- }
-}
diff --git a/layers/shader_checker.cpp b/layers/shader_checker.cpp
deleted file mode 100644
index fe4d4740..00000000
--- a/layers/shader_checker.cpp
+++ /dev/null
@@ -1,1340 +0,0 @@
-/*
- *
- * Copyright (C) 2015 Valve Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Chia-I Wu <olv@lunarg.com>
- * Author: Chris Forbes <chrisf@ijw.co.nz>
- */
-#include <string.h>
-#include <stdlib.h>
-#include <assert.h>
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-#include <map>
-#include <vector>
-#include <string>
-#include <iostream>
-#include "vk_loader_platform.h"
-#include "vk_dispatch_table_helper.h"
-#include "vulkan/vk_layer.h"
-#include "vk_layer_utils.h"
-#include "vk_layer_config.h"
-#include "vk_layer_table.h"
-#include "vk_enum_string_helper.h"
-#include "shader_checker.h"
-#include "vk_layer_extension_utils.h"
-
-#include <spirv.hpp>
-
-// fwd decls
-struct shader_module;
-struct render_pass;
-
-struct layer_data {
- debug_report_data *report_data;
- std::vector<VkDbgMsgCallback> logging_callback;
- VkLayerDispatchTable* device_dispatch_table;
- VkLayerInstanceDispatchTable* instance_dispatch_table;
-
- std::unordered_map<VkShaderModule, shader_module *> shader_module_map;
- std::unordered_map<VkDescriptorSetLayout, std::unordered_set<uint32_t>*> descriptor_set_layout_map;
- std::unordered_map<VkPipelineLayout, std::vector<std::unordered_set<uint32_t>*>*> pipeline_layout_map;
- std::unordered_map<VkRenderPass, render_pass *> render_pass_map;
-
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr)
- {};
-};
-
-static void
-build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index);
-
-struct shader_module {
- /* the spirv image itself */
- std::vector<uint32_t> words;
- /* a mapping of <id> to the first word of its def. this is useful because walking type
- * trees requires jumping all over the instruction stream.
- */
- std::unordered_map<unsigned, unsigned> type_def_index;
-
- shader_module(VkShaderModuleCreateInfo const *pCreateInfo) :
- words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
- type_def_index() {
-
- build_type_def_index(words, type_def_index);
- }
-};
-
-struct render_pass {
- std::vector<std::vector<VkFormat>> subpass_color_formats;
-
- render_pass(VkRenderPassCreateInfo const *pCreateInfo)
- {
- uint32_t i;
-
- subpass_color_formats.reserve(pCreateInfo->subpassCount);
- for (i = 0; i < pCreateInfo->subpassCount; i++) {
- const VkSubpassDescription *subpass = &pCreateInfo->pSubpasses[i];
- std::vector<VkFormat> color_formats;
- uint32_t j;
-
- color_formats.reserve(subpass->colorAttachmentCount);
- for (j = 0; j < subpass->colorAttachmentCount; j++) {
- const uint32_t att = subpass->pColorAttachments[j].attachment;
- const VkFormat format = pCreateInfo->pAttachments[att].format;
-
- color_formats.push_back(pCreateInfo->pAttachments[att].format);
- }
-
- subpass_color_formats.push_back(color_formats);
- }
- }
-};
-
-static std::unordered_map<void *, layer_data *> layer_data_map;
-
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key,
- std::unordered_map<void *, layer_data *> &data_map);
-
-static LOADER_PLATFORM_THREAD_ONCE_DECLARATION(g_initOnce);
-// TODO : This can be much smarter, using separate locks for separate global data
-static int globalLockInitialized = 0;
-static loader_platform_thread_mutex globalLock;
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
- VkDevice device,
- const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorSetLayout* pSetLayout)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- /* stash a copy of the layout bindings */
- VkResult result = my_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
-
- if (VK_SUCCESS == result) {
- loader_platform_thread_lock_mutex(&globalLock);
- auto& bindings = my_data->descriptor_set_layout_map[*pSetLayout];
- bindings = new std::unordered_set<uint32_t>();
- bindings->reserve(pCreateInfo->bindingCount);
- for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++)
- bindings->insert(pCreateInfo->pBinding[i].binding);
-
- loader_platform_thread_unlock_mutex(&globalLock);
- }
-
- return result;
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
- VkDevice device,
- const VkPipelineLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkPipelineLayout* pPipelineLayout)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = my_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
-
- if (VK_SUCCESS == result) {
- loader_platform_thread_lock_mutex(&globalLock);
- auto& layouts = my_data->pipeline_layout_map[*pPipelineLayout];
- layouts = new std::vector<std::unordered_set<uint32_t>*>();
- layouts->reserve(pCreateInfo->setLayoutCount);
- for (unsigned i = 0; i < pCreateInfo->setLayoutCount; i++) {
- layouts->push_back(my_data->descriptor_set_layout_map[pCreateInfo->pSetLayouts[i]]);
- }
- loader_platform_thread_unlock_mutex(&globalLock);
- }
-
- return result;
-}
-
-static void
-build_type_def_index(std::vector<unsigned> const &words, std::unordered_map<unsigned, unsigned> &type_def_index)
-{
- unsigned int const *code = (unsigned int const *)&words[0];
- size_t size = words.size();
-
- unsigned word = 5;
- while (word < size) {
- unsigned opcode = code[word] & 0x0ffffu;
- unsigned oplen = (code[word] & 0xffff0000u) >> 16;
-
- switch (opcode) {
- case spv::OpTypeVoid:
- case spv::OpTypeBool:
- case spv::OpTypeInt:
- case spv::OpTypeFloat:
- case spv::OpTypeVector:
- case spv::OpTypeMatrix:
- case spv::OpTypeImage:
- case spv::OpTypeSampler:
- case spv::OpTypeSampledImage:
- case spv::OpTypeArray:
- case spv::OpTypeRuntimeArray:
- case spv::OpTypeStruct:
- case spv::OpTypeOpaque:
- case spv::OpTypePointer:
- case spv::OpTypeFunction:
- case spv::OpTypeEvent:
- case spv::OpTypeDeviceEvent:
- case spv::OpTypeReserveId:
- case spv::OpTypeQueue:
- case spv::OpTypePipe:
- type_def_index[code[word+1]] = word;
- break;
-
- default:
- /* We only care about type definitions */
- break;
- }
-
- word += oplen;
- }
-}
-
-bool
-shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo)
-{
- uint32_t *words = (uint32_t *)pCreateInfo->pCode;
- size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
-
- /* Just validate that the header makes sense. */
- return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
-}
-
-static void
-init_shader_checker(layer_data *my_data)
-{
- uint32_t report_flags = 0;
- uint32_t debug_action = 0;
- FILE *log_output = NULL;
- const char *option_str;
- VkDbgMsgCallback callback;
- // initialize ShaderChecker options
- report_flags = getLayerOptionFlags("ShaderCheckerReportFlags", 0);
- getLayerOptionEnum("ShaderCheckerDebugAction", (uint32_t *) &debug_action);
-
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
- option_str = getLayerOption("ShaderCheckerLogFilename");
- log_output = getLayerLogOutput(option_str, "ShaderChecker");
- layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &callback);
- my_data->logging_callback.push_back(callback);
- }
-
- if (debug_action & VK_DBG_LAYER_ACTION_DEBUG_OUTPUT) {
- layer_create_msg_callback(my_data->report_data, report_flags, win32_debug_output_msg, NULL, &callback);
- my_data->logging_callback.push_back(callback);
- }
-
- if (!globalLockInitialized)
- {
- loader_platform_thread_create_mutex(&globalLock);
- globalLockInitialized = 1;
- }
-}
-
-static const VkLayerProperties shader_checker_global_layers[] = {
- {
- "ShaderChecker",
- VK_API_VERSION,
- VK_MAKE_VERSION(0, 1, 0),
- "Validation layer: ShaderChecker",
- }
-};
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
- /* shader checker does not have any global extensions */
- return util_GetExtensionProperties(0, NULL, pCount, pProperties);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(shader_checker_global_layers),
- shader_checker_global_layers,
- pCount, pProperties);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pCount,
- VkExtensionProperties* pProperties)
-{
- /* Shader checker does not have any physical device extensions */
- if (pLayerName == NULL) {
- dispatch_key key = get_dispatch_key(physicalDevice);
- layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(
- physicalDevice,
- NULL,
- pCount,
- pProperties);
- } else {
- return util_GetExtensionProperties(0, NULL, pCount, pProperties);
- }
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pCount,
- VkLayerProperties* pProperties)
-{
- /* Shader checker physical device layers are the same as global */
- return util_GetLayerProperties(ARRAY_SIZE(shader_checker_global_layers),
- shader_checker_global_layers,
- pCount, pProperties);
-}
-
-static char const *
-storage_class_name(unsigned sc)
-{
- switch (sc) {
- case spv::StorageClassInput: return "input";
- case spv::StorageClassOutput: return "output";
- case spv::StorageClassUniformConstant: return "const uniform";
- case spv::StorageClassUniform: return "uniform";
- case spv::StorageClassWorkgroup: return "workgroup local";
- case spv::StorageClassCrossWorkgroup: return "workgroup global";
- case spv::StorageClassPrivate: return "private global";
- case spv::StorageClassFunction: return "function";
- case spv::StorageClassGeneric: return "generic";
- case spv::StorageClassAtomicCounter: return "atomic counter";
- case spv::StorageClassImage: return "image";
- default: return "unknown";
- }
-}
-
-/* returns ptr to null terminator */
-static char *
-describe_type(char *dst, shader_module const *src, unsigned type)
-{
- auto type_def_it = src->type_def_index.find(type);
-
- if (type_def_it == src->type_def_index.end()) {
- return dst + sprintf(dst, "undef");
- }
-
- unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
- unsigned opcode = code[0] & 0x0ffffu;
- switch (opcode) {
- case spv::OpTypeBool:
- return dst + sprintf(dst, "bool");
- case spv::OpTypeInt:
- return dst + sprintf(dst, "%cint%d", code[3] ? 's' : 'u', code[2]);
- case spv::OpTypeFloat:
- return dst + sprintf(dst, "float%d", code[2]);
- case spv::OpTypeVector:
- dst += sprintf(dst, "vec%d of ", code[3]);
- return describe_type(dst, src, code[2]);
- case spv::OpTypeMatrix:
- dst += sprintf(dst, "mat%d of ", code[3]);
- return describe_type(dst, src, code[2]);
- case spv::OpTypeArray:
- dst += sprintf(dst, "arr[%d] of ", code[3]);
- return describe_type(dst, src, code[2]);
- case spv::OpTypePointer:
- dst += sprintf(dst, "ptr to %s ", storage_class_name(code[2]));
- return describe_type(dst, src, code[3]);
- case spv::OpTypeStruct:
- {
- unsigned oplen = code[0] >> 16;
- dst += sprintf(dst, "struct of (");
- for (unsigned i = 2; i < oplen; i++) {
- dst = describe_type(dst, src, code[i]);
- dst += sprintf(dst, i == oplen-1 ? ")" : ", ");
- }
- return dst;
- }
- case spv::OpTypeSampler:
- return dst + sprintf(dst, "sampler");
- default:
- return dst + sprintf(dst, "oddtype");
- }
-}
-
-static bool
-types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed)
-{
- auto a_type_def_it = a->type_def_index.find(a_type);
- auto b_type_def_it = b->type_def_index.find(b_type);
-
- if (a_type_def_it == a->type_def_index.end()) {
- return false;
- }
-
- if (b_type_def_it == b->type_def_index.end()) {
- return false;
- }
-
- /* walk two type trees together, and complain about differences */
- unsigned int const *a_code = (unsigned int const *)&a->words[a_type_def_it->second];
- unsigned int const *b_code = (unsigned int const *)&b->words[b_type_def_it->second];
-
- unsigned a_opcode = a_code[0] & 0x0ffffu;
- unsigned b_opcode = b_code[0] & 0x0ffffu;
-
- if (b_arrayed && b_opcode == spv::OpTypeArray) {
- /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
- return types_match(a, b, a_type, b_code[2], false);
- }
-
- if (a_opcode != b_opcode) {
- return false;
- }
-
- switch (a_opcode) {
- /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
- case spv::OpTypeBool:
- return true && !b_arrayed;
- case spv::OpTypeInt:
- /* match on width, signedness */
- return a_code[2] == b_code[2] && a_code[3] == b_code[3] && !b_arrayed;
- case spv::OpTypeFloat:
- /* match on width */
- return a_code[2] == b_code[2] && !b_arrayed;
- case spv::OpTypeVector:
- case spv::OpTypeMatrix:
- case spv::OpTypeArray:
- /* match on element type, count. these all have the same layout. we don't get here if
- * b_arrayed -- that is handled above. */
- return !b_arrayed && types_match(a, b, a_code[2], b_code[2], b_arrayed) && a_code[3] == b_code[3];
- case spv::OpTypeStruct:
- /* match on all element types */
- {
- if (b_arrayed) {
- /* for the purposes of matching different levels of arrayness, structs are leaves. */
- return false;
- }
-
- unsigned a_len = a_code[0] >> 16;
- unsigned b_len = b_code[0] >> 16;
-
- if (a_len != b_len) {
- return false; /* structs cannot match if member counts differ */
- }
-
- for (unsigned i = 2; i < a_len; i++) {
- if (!types_match(a, b, a_code[i], b_code[i], b_arrayed)) {
- return false;
- }
- }
-
- return true;
- }
- case spv::OpTypePointer:
- /* match on pointee type. storage class is expected to differ */
- return types_match(a, b, a_code[3], b_code[3], b_arrayed);
-
- default:
- /* remaining types are CLisms, or may not appear in the interfaces we
- * are interested in. Just claim no match.
- */
- return false;
-
- }
-}
-
-static int
-value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def)
-{
- auto it = map.find(id);
- if (it == map.end())
- return def;
- else
- return it->second;
-}
-
-
-static unsigned
-get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level)
-{
- auto type_def_it = src->type_def_index.find(type);
-
- if (type_def_it == src->type_def_index.end()) {
- return 1; /* This is actually broken SPIR-V... */
- }
-
- unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
- unsigned opcode = code[0] & 0x0ffffu;
-
- switch (opcode) {
- case spv::OpTypePointer:
- /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
- * we're never actually passing pointers around. */
- return get_locations_consumed_by_type(src, code[3], strip_array_level);
- case spv::OpTypeArray:
- if (strip_array_level) {
- return get_locations_consumed_by_type(src, code[2], false);
- }
- else {
- return code[3] * get_locations_consumed_by_type(src, code[2], false);
- }
- case spv::OpTypeMatrix:
- /* num locations is the dimension * element size */
- return code[3] * get_locations_consumed_by_type(src, code[2], false);
- default:
- /* everything else is just 1. */
- return 1;
-
- /* TODO: extend to handle 64bit scalar types, whose vectors may need
- * multiple locations. */
- }
-}
-
-
-struct interface_var {
- uint32_t id;
- uint32_t type_id;
- uint32_t offset;
- /* TODO: collect the name, too? Isn't required to be present. */
-};
-
-static void
-collect_interface_by_location(layer_data *my_data, VkDevice dev,
- shader_module const *src, spv::StorageClass sinterface,
- std::map<uint32_t, interface_var> &out,
- std::map<uint32_t, interface_var> &builtins_out,
- bool is_array_of_verts)
-{
- unsigned int const *code = (unsigned int const *)&src->words[0];
- size_t size = src->words.size();
-
- std::unordered_map<unsigned, unsigned> var_locations;
- std::unordered_map<unsigned, unsigned> var_builtins;
-
- unsigned word = 5;
- while (word < size) {
-
- unsigned opcode = code[word] & 0x0ffffu;
- unsigned oplen = (code[word] & 0xffff0000u) >> 16;
-
- /* We consider two interface models: SSO rendezvous-by-location, and
- * builtins. Complain about anything that fits neither model.
- */
- if (opcode == spv::OpDecorate) {
- if (code[word+2] == spv::DecorationLocation) {
- var_locations[code[word+1]] = code[word+3];
- }
-
- if (code[word+2] == spv::DecorationBuiltIn) {
- var_builtins[code[word+1]] = code[word+3];
- }
- }
-
- /* TODO: handle grouped decorations */
- /* TODO: handle index=1 dual source outputs from FS -- two vars will
- * have the same location, and we DONT want to clobber. */
-
- if (opcode == spv::OpVariable && code[word+3] == sinterface) {
- unsigned id = code[word+2];
- unsigned type = code[word+1];
-
- int location = value_or_default(var_locations, code[word+2], -1);
- int builtin = value_or_default(var_builtins, code[word+2], -1);
-
- if (location == -1 && builtin == -1) {
- /* No location defined, and not bound to an API builtin.
- * The spec says nothing about how this case works (or doesn't)
- * for interface matching.
- */
- log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
- "var %d (type %d) in %s interface has no Location or Builtin decoration",
- code[word+2], code[word+1], storage_class_name(sinterface));
- }
- else if (location != -1) {
- /* A user-defined interface variable, with a location. Where a variable
- * occupied multiple locations, emit one result for each. */
- unsigned num_locations = get_locations_consumed_by_type(src, type,
- is_array_of_verts);
- for (int offset = 0; offset < num_locations; offset++) {
- interface_var v;
- v.id = id;
- v.type_id = type;
- v.offset = offset;
- out[location + offset] = v;
- }
- }
- else {
- /* A builtin interface variable */
- /* Note that since builtin interface variables do not consume numbered
- * locations, there is no larger-than-vec4 consideration as above
- */
- interface_var v;
- v.id = id;
- v.type_id = type;
- v.offset = 0;
- builtins_out[builtin] = v;
- }
- }
-
- word += oplen;
- }
-}
-
-static void
-collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev,
- shader_module const *src, spv::StorageClass sinterface,
- std::map<std::pair<unsigned, unsigned>, interface_var> &out)
-{
- unsigned int const *code = (unsigned int const *)&src->words[0];
- size_t size = src->words.size();
-
- std::unordered_map<unsigned, unsigned> var_sets;
- std::unordered_map<unsigned, unsigned> var_bindings;
-
- unsigned word = 5;
- while (word < size) {
-
- unsigned opcode = code[word] & 0x0ffffu;
- unsigned oplen = (code[word] & 0xffff0000u) >> 16;
-
- /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
- * DecorationDescriptorSet and DecorationBinding.
- */
- if (opcode == spv::OpDecorate) {
- if (code[word+2] == spv::DecorationDescriptorSet) {
- var_sets[code[word+1]] = code[word+3];
- }
-
- if (code[word+2] == spv::DecorationBinding) {
- var_bindings[code[word+1]] = code[word+3];
- }
- }
-
- if (opcode == spv::OpVariable && (code[word+3] == spv::StorageClassUniform ||
- code[word+3] == spv::StorageClassUniformConstant)) {
- unsigned set = value_or_default(var_sets, code[word+2], 0);
- unsigned binding = value_or_default(var_bindings, code[word+2], 0);
-
- auto existing_it = out.find(std::make_pair(set, binding));
- if (existing_it != out.end()) {
- /* conflict within spv image */
- log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0,
- SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
- "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
- code[word+2], code[word+1], storage_class_name(sinterface),
- existing_it->first.first, existing_it->first.second);
- }
-
- interface_var v;
- v.id = code[word+2];
- v.type_id = code[word+1];
- out[std::make_pair(set, binding)] = v;
- }
-
- word += oplen;
- }
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
- VkDevice device,
- const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkShaderModule *pShaderModule)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- bool skip_call = false;
- if (!shader_is_spirv(pCreateInfo)) {
- skip_call |= log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE,
- /* dev */ 0, 0, SHADER_CHECKER_NON_SPIRV_SHADER, "SC",
- "Shader is not SPIR-V");
- }
-
- if (skip_call)
- return VK_ERROR_VALIDATION_FAILED;
-
- VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
-
- if (res == VK_SUCCESS) {
- loader_platform_thread_lock_mutex(&globalLock);
- my_data->shader_module_map[*pShaderModule] = new shader_module(pCreateInfo);
- loader_platform_thread_unlock_mutex(&globalLock);
- }
- return res;
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
- VkDevice device,
- const VkRenderPassCreateInfo *pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkRenderPass *pRenderPass)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult res = my_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
-
- loader_platform_thread_lock_mutex(&globalLock);
- my_data->render_pass_map[*pRenderPass] = new render_pass(pCreateInfo);
- loader_platform_thread_unlock_mutex(&globalLock);
- return res;
-}
-
-static bool
-validate_interface_between_stages(layer_data *my_data, VkDevice dev,
- shader_module const *producer, char const *producer_name,
- shader_module const *consumer, char const *consumer_name,
- bool consumer_arrayed_input)
-{
- std::map<uint32_t, interface_var> outputs;
- std::map<uint32_t, interface_var> inputs;
-
- std::map<uint32_t, interface_var> builtin_outputs;
- std::map<uint32_t, interface_var> builtin_inputs;
-
- bool pass = true;
-
- collect_interface_by_location(my_data, dev, producer, spv::StorageClassOutput, outputs, builtin_outputs, false);
- collect_interface_by_location(my_data, dev, consumer, spv::StorageClassInput, inputs, builtin_inputs,
- consumer_arrayed_input);
-
- auto a_it = outputs.begin();
- auto b_it = inputs.begin();
-
- /* maps sorted by key (location); walk them together to find mismatches */
- while ((outputs.size() > 0 && a_it != outputs.end()) || ( inputs.size() && b_it != inputs.end())) {
- bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
- bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
- auto a_first = a_at_end ? 0 : a_it->first;
- auto b_first = b_at_end ? 0 : b_it->first;
-
- if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_PERF_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "%s writes to output location %d which is not consumed by %s", producer_name, a_first, consumer_name)) {
- pass = false;
- }
- a_it++;
- }
- else if (a_at_end || a_first > b_first) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "%s consumes input location %d which is not written by %s", consumer_name, b_first, producer_name)) {
- pass = false;
- }
- b_it++;
- }
- else {
- if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
- /* OK! */
- }
- else {
- char producer_type[1024];
- char consumer_type[1024];
- describe_type(producer_type, producer, a_it->second.type_id);
- describe_type(consumer_type, consumer, b_it->second.type_id);
-
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Type mismatch on location %d: '%s' vs '%s'", a_it->first, producer_type, consumer_type)) {
- pass = false;
- }
- }
- a_it++;
- b_it++;
- }
- }
-
- return pass;
-}
-
-
-enum FORMAT_TYPE {
- FORMAT_TYPE_UNDEFINED,
- FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
- FORMAT_TYPE_SINT,
- FORMAT_TYPE_UINT,
-};
-
-
-static unsigned
-get_format_type(VkFormat fmt) {
- switch (fmt) {
- case VK_FORMAT_UNDEFINED:
- return FORMAT_TYPE_UNDEFINED;
- case VK_FORMAT_R8_SINT:
- case VK_FORMAT_R8G8_SINT:
- case VK_FORMAT_R8G8B8_SINT:
- case VK_FORMAT_R8G8B8A8_SINT:
- case VK_FORMAT_R16_SINT:
- case VK_FORMAT_R16G16_SINT:
- case VK_FORMAT_R16G16B16_SINT:
- case VK_FORMAT_R16G16B16A16_SINT:
- case VK_FORMAT_R32_SINT:
- case VK_FORMAT_R32G32_SINT:
- case VK_FORMAT_R32G32B32_SINT:
- case VK_FORMAT_R32G32B32A32_SINT:
- case VK_FORMAT_B8G8R8_SINT:
- case VK_FORMAT_B8G8R8A8_SINT:
- case VK_FORMAT_A2B10G10R10_SINT_PACK32:
- case VK_FORMAT_A2R10G10B10_SINT_PACK32:
- return FORMAT_TYPE_SINT;
- case VK_FORMAT_R8_UINT:
- case VK_FORMAT_R8G8_UINT:
- case VK_FORMAT_R8G8B8_UINT:
- case VK_FORMAT_R8G8B8A8_UINT:
- case VK_FORMAT_R16_UINT:
- case VK_FORMAT_R16G16_UINT:
- case VK_FORMAT_R16G16B16_UINT:
- case VK_FORMAT_R16G16B16A16_UINT:
- case VK_FORMAT_R32_UINT:
- case VK_FORMAT_R32G32_UINT:
- case VK_FORMAT_R32G32B32_UINT:
- case VK_FORMAT_R32G32B32A32_UINT:
- case VK_FORMAT_B8G8R8_UINT:
- case VK_FORMAT_B8G8R8A8_UINT:
- case VK_FORMAT_A2B10G10R10_UINT_PACK32:
- case VK_FORMAT_A2R10G10B10_UINT_PACK32:
- return FORMAT_TYPE_UINT;
- default:
- return FORMAT_TYPE_FLOAT;
- }
-}
-
-
-/* characterizes a SPIR-V type appearing in an interface to a FF stage,
- * for comparison to a VkFormat's characterization above. */
-static unsigned
-get_fundamental_type(shader_module const *src, unsigned type)
-{
- auto type_def_it = src->type_def_index.find(type);
-
- if (type_def_it == src->type_def_index.end()) {
- return FORMAT_TYPE_UNDEFINED;
- }
-
- unsigned int const *code = (unsigned int const *)&src->words[type_def_it->second];
- unsigned opcode = code[0] & 0x0ffffu;
- switch (opcode) {
- case spv::OpTypeInt:
- return code[3] ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
- case spv::OpTypeFloat:
- return FORMAT_TYPE_FLOAT;
- case spv::OpTypeVector:
- return get_fundamental_type(src, code[2]);
- case spv::OpTypeMatrix:
- return get_fundamental_type(src, code[2]);
- case spv::OpTypeArray:
- return get_fundamental_type(src, code[2]);
- case spv::OpTypePointer:
- return get_fundamental_type(src, code[3]);
- default:
- return FORMAT_TYPE_UNDEFINED;
- }
-}
-
-
-static bool
-validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi)
-{
- /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
- * each binding should be specified only once.
- */
- std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
- bool pass = true;
-
- for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
- auto desc = &vi->pVertexBindingDescriptions[i];
- auto & binding = bindings[desc->binding];
- if (binding) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INCONSISTENT_VI, "SC",
- "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
- pass = false;
- }
- }
- else {
- binding = desc;
- }
- }
-
- return pass;
-}
-
-
-static bool
-validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi, shader_module const *vs)
-{
- std::map<uint32_t, interface_var> inputs;
- /* we collect builtin inputs, but they will never appear in the VI state --
- * the vs builtin inputs are generated in the pipeline, not sourced from buffers (VertexID, etc)
- */
- std::map<uint32_t, interface_var> builtin_inputs;
- bool pass = true;
-
- collect_interface_by_location(my_data, dev, vs, spv::StorageClassInput, inputs, builtin_inputs, false);
-
- /* Build index by location */
- std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
- if (vi) {
- for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
- attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
- }
-
- auto it_a = attribs.begin();
- auto it_b = inputs.begin();
-
- while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
- bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
- bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
- auto a_first = a_at_end ? 0 : it_a->first;
- auto b_first = b_at_end ? 0 : it_b->first;
- if (b_at_end || a_first < b_first) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_PERF_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "Vertex attribute at location %d not consumed by VS", a_first)) {
- pass = false;
- }
- it_a++;
- }
- else if (a_at_end || b_first < a_first) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "VS consumes input at location %d but not provided", b_first)) {
- pass = false;
- }
- it_b++;
- }
- else {
- unsigned attrib_type = get_format_type(it_a->second->format);
- unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
-
- /* type checking */
- if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
- char vs_type[1024];
- describe_type(vs_type, vs, it_b->second.type_id);
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Attribute type of `%s` at location %d does not match VS input type of `%s`",
- string_VkFormat(it_a->second->format), a_first, vs_type)) {
- pass = false;
- }
- }
-
- /* OK! */
- it_a++;
- it_b++;
- }
- }
-
- return pass;
-}
-
-
-static bool
-validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs, render_pass const *rp, uint32_t subpass)
-{
- const std::vector<VkFormat> &color_formats = rp->subpass_color_formats[subpass];
- std::map<uint32_t, interface_var> outputs;
- std::map<uint32_t, interface_var> builtin_outputs;
- bool pass = true;
-
- /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
-
- collect_interface_by_location(my_data, dev, fs, spv::StorageClassOutput, outputs, builtin_outputs, false);
-
- auto it = outputs.begin();
- uint32_t attachment = 0;
-
- /* Walk attachment list and outputs together -- this is a little overpowered since attachments
- * are currently dense, but the parallel with matching between shader stages is nice.
- */
-
- /* TODO: Figure out compile error with cb->attachmentCount */
- while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
- if (attachment == color_formats.size() || ( it != outputs.end() && it->first < attachment)) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "FS writes to output location %d with no matching attachment", it->first)) {
- pass = false;
- }
- it++;
- }
- else if (it == outputs.end() || it->first > attachment) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "Attachment %d not written by FS", attachment)) {
- pass = false;
- }
- attachment++;
- }
- else {
- unsigned output_type = get_fundamental_type(fs, it->second.type_id);
- unsigned att_type = get_format_type(color_formats[attachment]);
-
- /* type checking */
- if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
- char fs_type[1024];
- describe_type(fs_type, fs, it->second.type_id);
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Attachment %d of type `%s` does not match FS output type of `%s`",
- attachment, string_VkFormat(color_formats[attachment]), fs_type)) {
- pass = false;
- }
- }
-
- /* OK! */
- it++;
- attachment++;
- }
- }
-
- return pass;
-}
-
-
-struct shader_stage_attributes {
- char const * const name;
- bool arrayed_input;
-};
-
-
-static shader_stage_attributes
-shader_stage_attribs[] = {
- { "vertex shader", false },
- { "tessellation control shader", true },
- { "tessellation evaluation shader", false },
- { "geometry shader", true },
- { "fragment shader", false },
-};
-
-
-static bool
-has_descriptor_binding(std::vector<std::unordered_set<uint32_t>*>* layout,
- std::pair<unsigned, unsigned> slot)
-{
- if (!layout)
- return false;
-
- if (slot.first >= layout->size())
- return false;
-
- auto set = (*layout)[slot.first];
-
- return (set->find(slot.second) != set->end());
-}
-
-static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage)
-{
- uint32_t bit_pos = u_ffs(stage);
- return bit_pos-1;
-}
-
-static bool
-validate_graphics_pipeline(layer_data *my_data, VkDevice dev, VkGraphicsPipelineCreateInfo const *pCreateInfo)
-{
- /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
- * before trying to do anything more: */
- int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
- int geometry_stage = get_shader_stage_id(VK_SHADER_STAGE_GEOMETRY_BIT);
- int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
-
- shader_module **shaders = new shader_module*[fragment_stage + 1]; /* exclude CS */
- memset(shaders, 0, sizeof(shader_module *) * (fragment_stage +1));
- render_pass const *rp = 0;
- VkPipelineVertexInputStateCreateInfo const *vi = 0;
- bool pass = true;
-
- loader_platform_thread_lock_mutex(&globalLock);
-
- for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
- if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
-
- if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT
- | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
- if (log_msg(my_data->report_data, VK_DBG_REPORT_WARN_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0, SHADER_CHECKER_UNKNOWN_STAGE, "SC",
- "Unknown shader stage %d", pStage->stage)) {
- pass = false;
- }
- }
- else {
- shader_module *module = my_data->shader_module_map[pStage->module];
- shaders[get_shader_stage_id(pStage->stage)] = module;
-
- /* validate descriptor set layout against what the spirv module actually uses */
- std::map<std::pair<unsigned, unsigned>, interface_var> descriptor_uses;
- collect_interface_by_descriptor_slot(my_data, dev, module, spv::StorageClassUniform,
- descriptor_uses);
-
- auto layout = pCreateInfo->layout != VK_NULL_HANDLE ?
- my_data->pipeline_layout_map[pCreateInfo->layout] : nullptr;
-
- for (auto it = descriptor_uses.begin(); it != descriptor_uses.end(); it++) {
-
- /* find the matching binding */
- auto found = has_descriptor_binding(layout, it->first);
-
- if (!found) {
- char type_name[1024];
- describe_type(type_name, module, it->second.type_id);
- if (log_msg(my_data->report_data, VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_DEVICE, /*dev*/0, 0,
- SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
- "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
- it->first.first, it->first.second, type_name)) {
- pass = false;
- }
- }
- }
- }
- }
- }
-
- if (pCreateInfo->renderPass != VK_NULL_HANDLE)
- rp = my_data->render_pass_map[pCreateInfo->renderPass];
-
- vi = pCreateInfo->pVertexInputState;
-
- if (vi) {
- pass = validate_vi_consistency(my_data, dev, vi) && pass;
- }
-
- if (shaders[vertex_stage]) {
- pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage]) && pass;
- }
-
- /* TODO: enforce rules about present combinations of shaders */
- int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
- int consumer = get_shader_stage_id(VK_SHADER_STAGE_GEOMETRY_BIT);
-
- while (!shaders[producer] && producer != fragment_stage) {
- producer++;
- consumer++;
- }
-
- for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
- assert(shaders[producer]);
- if (shaders[consumer]) {
- pass = validate_interface_between_stages(my_data, dev,
- shaders[producer], shader_stage_attribs[producer].name,
- shaders[consumer], shader_stage_attribs[consumer].name,
- shader_stage_attribs[consumer].arrayed_input) && pass;
-
- producer = consumer;
- }
- }
-
- if (shaders[fragment_stage] && rp) {
- pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], rp, pCreateInfo->subpass) && pass;
- }
-
- delete shaders;
-
- loader_platform_thread_unlock_mutex(&globalLock);
- return pass;
-}
-
-//TODO handle pipelineCache entry points
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
-vkCreateGraphicsPipelines(VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks* pAllocator,
- VkPipeline *pPipelines)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- bool pass = true;
- for (uint32_t i = 0; i < count; i++) {
- pass = validate_graphics_pipeline(my_data, device, &pCreateInfos[i]) && pass;
- }
-
- if (pass) {
- /* The driver is allowed to crash if passed junk. Only actually create the
- * pipeline if we didn't run into any showstoppers above.
- */
- return my_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
- }
- else {
- return VK_ERROR_VALIDATION_FAILED;
- }
-}
-
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
- layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
- VkResult result = my_device_data->device_dispatch_table->CreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
- if (result == VK_SUCCESS) {
- layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
- my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
- }
- return result;
-}
-
-/* hook DextroyDevice to remove tableMap entry */
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
- dispatch_key key = get_dispatch_key(device);
- layer_data *my_device_data = get_my_data_ptr(key, layer_data_map);
- my_device_data->device_dispatch_table->DestroyDevice(device, pAllocator);
- delete my_device_data->device_dispatch_table;
- layer_data_map.erase(key);
-}
-
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
- const VkInstanceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkInstance* pInstance)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
- VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
- VkResult result = pTable->CreateInstance(pCreateInfo, pAllocator, pInstance);
-
- if (result == VK_SUCCESS) {
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
- my_data->report_data = debug_report_create_instance(
- pTable,
- *pInstance,
- pCreateInfo->enabledExtensionNameCount,
- pCreateInfo->ppEnabledExtensionNames);
-
- init_shader_checker(my_data);
- }
- return result;
-}
-
-/* hook DestroyInstance to remove tableInstanceMap entry */
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
- dispatch_key key = get_dispatch_key(instance);
- layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- my_data->instance_dispatch_table->DestroyInstance(instance, pAllocator);
-
- // Clean up logging callback, if any
- while (my_data->logging_callback.size() > 0) {
- VkDbgMsgCallback callback = my_data->logging_callback.back();
- layer_destroy_msg_callback(my_data->report_data, callback);
- my_data->logging_callback.pop_back();
- }
-
- layer_debug_report_destroy_instance(my_data->report_data);
- delete my_data->instance_dispatch_table;
- layer_data_map.erase(key);
- if (layer_data_map.empty()) {
- // Release mutex when destroying last instance.
- loader_platform_thread_delete_mutex(&globalLock);
- globalLockInitialized = 0;
- }
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDbgCreateMsgCallback(
- VkInstance instance,
- VkFlags msgFlags,
- const PFN_vkDbgMsgCallback pfnMsgCallback,
- void* pUserData,
- VkDbgMsgCallback* pMsgCallback)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- VkResult res = my_data->instance_dispatch_table->DbgCreateMsgCallback(instance, msgFlags, pfnMsgCallback, pUserData, pMsgCallback);
- if (VK_SUCCESS == res) {
- res = layer_create_msg_callback(my_data->report_data, msgFlags, pfnMsgCallback, pUserData, pMsgCallback);
- }
- return res;
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDbgDestroyMsgCallback(
- VkInstance instance,
- VkDbgMsgCallback msgCallback)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- VkResult res = my_data->instance_dispatch_table->DbgDestroyMsgCallback(instance, msgCallback);
- layer_destroy_msg_callback(my_data->report_data, msgCallback);
- return res;
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char* funcName)
-{
- if (dev == NULL)
- return NULL;
-
- layer_data *my_data;
- /* loader uses this to force layer initialization; device object is wrapped */
- if (!strcmp("vkGetDeviceProcAddr", funcName)) {
- VkBaseLayerObject* wrapped_dev = (VkBaseLayerObject*) dev;
- my_data = get_my_data_ptr(get_dispatch_key(wrapped_dev->baseObject), layer_data_map);
- my_data->device_dispatch_table = new VkLayerDispatchTable;
- layer_initialize_dispatch_table(my_data->device_dispatch_table, wrapped_dev);
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
- }
- my_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
-
-#define ADD_HOOK(fn) \
- if (!strncmp(#fn, funcName, sizeof(#fn))) \
- return (PFN_vkVoidFunction) fn
-
- ADD_HOOK(vkCreateDevice);
- ADD_HOOK(vkCreateShaderModule);
- ADD_HOOK(vkCreateRenderPass);
- ADD_HOOK(vkDestroyDevice);
- ADD_HOOK(vkCreateGraphicsPipelines);
- ADD_HOOK(vkCreateDescriptorSetLayout);
- ADD_HOOK(vkCreatePipelineLayout);
-#undef ADD_HOOK
-
- VkLayerDispatchTable* pTable = my_data->device_dispatch_table;
- {
- if (pTable->GetDeviceProcAddr == NULL)
- return NULL;
- return pTable->GetDeviceProcAddr(dev, funcName);
- }
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
- PFN_vkVoidFunction fptr;
-
- if (instance == NULL)
- return NULL;
-
- layer_data *my_data;
- if (!strcmp("vkGetInstanceProcAddr", funcName)) {
- VkBaseLayerObject* wrapped_inst = (VkBaseLayerObject*) instance;
- my_data = get_my_data_ptr(get_dispatch_key(wrapped_inst->baseObject), layer_data_map);
- my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
- layer_init_instance_dispatch_table(my_data->instance_dispatch_table, wrapped_inst);
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
- }
-#define ADD_HOOK(fn) \
- if (!strncmp(#fn, funcName, sizeof(#fn))) \
- return (PFN_vkVoidFunction) fn
-
- ADD_HOOK(vkCreateInstance);
- ADD_HOOK(vkDestroyInstance);
- ADD_HOOK(vkEnumerateInstanceExtensionProperties);
- ADD_HOOK(vkEnumerateDeviceExtensionProperties);
- ADD_HOOK(vkEnumerateInstanceLayerProperties);
- ADD_HOOK(vkEnumerateDeviceLayerProperties);
-#undef ADD_HOOK
-
-
- my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
- if (fptr)
- return fptr;
-
- {
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
- if (pTable->GetInstanceProcAddr == NULL)
- return NULL;
- return pTable->GetInstanceProcAddr(instance, funcName);
- }
-}
diff --git a/layers/shader_checker.h b/layers/shader_checker.h
deleted file mode 100644
index 47806f44..00000000
--- a/layers/shader_checker.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *
- * Copyright (C) 2015 Valve Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Chris Forbes <chrisf@ijw.co.nz>
- */
-#include "vulkan/vk_layer.h"
-#include "vulkan/vk_lunarg_debug_report.h"
-#include "vk_layer_logging.h"
-
-/* Shader checker error codes */
-typedef enum _SHADER_CHECKER_ERROR
-{
- SHADER_CHECKER_NONE,
- SHADER_CHECKER_FS_MIXED_BROADCAST, /* FS writes broadcast output AND custom outputs */
- SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, /* Type mismatch between shader stages or shader and pipeline */
- SHADER_CHECKER_OUTPUT_NOT_CONSUMED, /* Entry appears in output interface, but missing in input */
- SHADER_CHECKER_INPUT_NOT_PRODUCED, /* Entry appears in input interface, but missing in output */
- SHADER_CHECKER_NON_SPIRV_SHADER, /* Shader image is not SPIR-V */
- SHADER_CHECKER_INCONSISTENT_SPIRV, /* General inconsistency within a SPIR-V module */
- SHADER_CHECKER_UNKNOWN_STAGE, /* Stage is not supported by analysis */
- SHADER_CHECKER_INCONSISTENT_VI, /* VI state contains conflicting binding or attrib descriptions */
- SHADER_CHECKER_MISSING_DESCRIPTOR, /* Shader attempts to use a descriptor binding not declared in the layout */
-} SHADER_CHECKER_ERROR;
diff --git a/layers/vk_layer_settings.txt b/layers/vk_layer_settings.txt
index 5c753419..1aee14f5 100644
--- a/layers/vk_layer_settings.txt
+++ b/layers/vk_layer_settings.txt
@@ -63,11 +63,6 @@ ParamCheckerDebugAction = VK_DBG_LAYER_ACTION_LOG_MSG
ParamCheckerReportFlags = error,warn,perf
ParamCheckerLogFilename = stdout
-# VK_LUNARG_LAYER_ShaderChecker Settings
-ShaderCheckerDebugAction = VK_DBG_LAYER_ACTION_LOG_MSG
-ShaderCheckerReportFlags = error,warn,perf
-ShaderCheckerLogFilename = stdout
-
# VK_LUNARG_LAYER_Swapchain Settings
SwapchainDebugAction = VK_DBG_LAYER_ACTION_LOG_MSG
SwapchainReportFlags = error,warn,perf
diff --git a/layers/vk_validation_layer_details.md b/layers/vk_validation_layer_details.md
index 3b271750..27e6023a 100644
--- a/layers/vk_validation_layer_details.md
+++ b/layers/vk_validation_layer_details.md
@@ -6,7 +6,7 @@
### DrawState Overview
-The DrawState layer tracks state leading into Draw cmds. This includes the Pipeline state, dynamic state, and descriptor set state. DrawState validates the consistency and correctness between and within these states.
+The DrawState layer tracks state leading into Draw cmds. This includes the Pipeline state, dynamic state, shaders, and descriptor set state. DrawState validates the consistency and correctness between and within these states. DrawState also includes SPIR-V validation which functionality is recorded under the ShaderChecker section below.
### DrawState Details Table
@@ -91,6 +91,34 @@ Additional checks to be added to DrawState
32. Update Gfx Pipe Create Info shadowing to remove new/delete and instead use unique_ptrs for auto clean-up
33. Add validation for Pipeline Derivatives (see Pipeline Derivatives) section of the spec
+## ShaderChecker
+
+### ShaderChecker Overview
+
+The ShaderChecker functionality is part of DrawState layer and it inspects the SPIR-V shader images and fixed function pipeline stages at PSO creation time.
+It flags errors when inconsistencies are found across interfaces between shader stages. The exact behavior of the checks
+depends on the pair of pipeline stages involved.
+
+### ShaderChecker Details Table
+
+| Check | Overview | ENUM SHADER_CHECKER_* | Relevant API | Testname | Notes/TODO |
+| ----- | -------- | ---------------- | ------------ | -------- | ---------- |
+| Not consumed | Flag warning if a location is not consumed (useless work) | OUTPUT_NOT_CONSUMED | vkCreateGraphicsPipelines | CreatePipeline*NotConsumed | NA |
+| Not produced | Flag error if a location is not produced (consumer reads garbage) | INPUT_NOT_PRODUCED | vkCreateGraphicsPipelines | CreatePipeline*NotProvided | NA |
+| Type mismatch | Flag error if a location has inconsistent types | INTERFACE_TYPE_MISMATCH | vkCreateGraphicsPipelines | CreatePipeline*TypeMismatch | Between shader stages, an exact structural type match is required. Between VI and VS, or between FS and CB, only the basic component type must match (float for UNORM/SNORM/FLOAT, int for SINT, uint for UINT) as the VI and CB stages perform conversions to the exact format. |
+| Inconsistent shader | Flag error if an inconsistent SPIR-V image is detected. Possible cases include broken type definitions which the layer fails to walk. | INCONSISTENT_SPIRV | vkCreateGraphicsPipelines | TODO | All current tests use the reference compiler to produce valid SPIRV images from GLSL. |
+| Non-SPIRV shader | Flag warning if a non-SPIR-V shader image is detected. This can occur if early drivers are ingesting GLSL. ShaderChecker cannot analyze non-SPIRV shaders, so this suppresses most other checks. | NON_SPIRV_SHADER | vkCreateGraphicsPipelines | TODO | NA |
+| FS mixed broadcast | Flag error if the fragment shader writes both the legacy gl_FragCoord (which broadcasts to all CBs) and custom FS outputs. | FS_MIXED_BROADCAST | vkCreateGraphicsPipelines | TODO | Reference compiler refuses to compile shaders which do this |
+| VI Binding Descriptions | Validate that there is a single vertex input binding description for each binding | INCONSISTENT_VI | vkCreateGraphicsPipelines | CreatePipelineAttribBindingConflict | NA |
+| Shader Stage Check | Warns if shader stage is unsupported | UNKNOWN_STAGE | vkCreateGraphicsPipelines | TBD | NA |
+| Missing Descriptor | Flags error if shader attempts to use a descriptor binding not declared in the layout | MISSING_DESCRIPTOR | vkCreateGraphicsPipelines | CreatePipelineUniformBlockNotProvided | NA |
+| NA | Enum used for informational messages | NONE | | NA | None |
+
+### ShaderChecker Pending Work
+- Additional test cases for variously broken SPIRV images
+- Validation of a single SPIRV image in isolation (the spec describes many constraints)
+- Validation of SPIRV use of descriptors against the declared descriptor set layout
+
## ParamChecker
### ParamChecker Overview
@@ -191,35 +219,6 @@ The MemTracker layer tracks memory objects and references and validates that the
12. Modify INVALID_FENCE_STATE to be WARNINGs instead of ERROR
13. Report destroy or modify of resources in use on queues and not cleared by fence or WaitIdle. Could be fence, semaphore, or objects used by submitted CommandBuffers.
-
-## ShaderChecker
-
-### ShaderChecker Overview
-
-The ShaderChecker layer inspects the SPIR-V shader images and fixed function pipeline stages at PSO creation time.
-It flags errors when inconsistencies are found across interfaces between shader stages. The exact behavior of the checks
-depends on the pair of pipeline stages involved.
-
-### ShaderChecker Details Table
-
-| Check | Overview | ENUM SHADER_CHECKER_* | Relevant API | Testname | Notes/TODO |
-| ----- | -------- | ---------------- | ------------ | -------- | ---------- |
-| Not consumed | Flag warning if a location is not consumed (useless work) | OUTPUT_NOT_CONSUMED | vkCreateGraphicsPipelines | CreatePipeline*NotConsumed | NA |
-| Not produced | Flag error if a location is not produced (consumer reads garbage) | INPUT_NOT_PRODUCED | vkCreateGraphicsPipelines | CreatePipeline*NotProvided | NA |
-| Type mismatch | Flag error if a location has inconsistent types | INTERFACE_TYPE_MISMATCH | vkCreateGraphicsPipelines | CreatePipeline*TypeMismatch | Between shader stages, an exact structural type match is required. Between VI and VS, or between FS and CB, only the basic component type must match (float for UNORM/SNORM/FLOAT, int for SINT, uint for UINT) as the VI and CB stages perform conversions to the exact format. |
-| Inconsistent shader | Flag error if an inconsistent SPIR-V image is detected. Possible cases include broken type definitions which the layer fails to walk. | INCONSISTENT_SPIRV | vkCreateGraphicsPipelines | TODO | All current tests use the reference compiler to produce valid SPIRV images from GLSL. |
-| Non-SPIRV shader | Flag warning if a non-SPIR-V shader image is detected. This can occur if early drivers are ingesting GLSL. ShaderChecker cannot analyze non-SPIRV shaders, so this suppresses most other checks. | NON_SPIRV_SHADER | vkCreateGraphicsPipelines | TODO | NA |
-| FS mixed broadcast | Flag error if the fragment shader writes both the legacy gl_FragCoord (which broadcasts to all CBs) and custom FS outputs. | FS_MIXED_BROADCAST | vkCreateGraphicsPipelines | TODO | Reference compiler refuses to compile shaders which do this |
-| VI Binding Descriptions | Validate that there is a single vertex input binding description for each binding | INCONSISTENT_VI | vkCreateGraphicsPipelines | CreatePipelineAttribBindingConflict | NA |
-| Shader Stage Check | Warns if shader stage is unsupported | UNKNOWN_STAGE | vkCreateGraphicsPipelines | TBD | NA |
-| Missing Descriptor | Flags error if shader attempts to use a descriptor binding not declared in the layout | MISSING_DESCRIPTOR | vkCreateGraphicsPipelines | CreatePipelineUniformBlockNotProvided | NA |
-| NA | Enum used for informational messages | NONE | | NA | None |
-
-### ShaderChecker Pending Work
-- Additional test cases for variously broken SPIRV images
-- Validation of a single SPIRV image in isolation (the spec describes many constraints)
-- Validation of SPIRV use of descriptors against the declared descriptor set layout
-
## ObjectTracker
### ObjectTracker Overview
diff --git a/layers/windows/shader_checker.json b/layers/windows/shader_checker.json
deleted file mode 100644
index 57975d10..00000000
--- a/layers/windows/shader_checker.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "file_format_version" : "1.0.0",
- "layer" : {
- "name": "VK_LAYER_LUNARG_ShaderChecker",
- "type": "GLOBAL",
- "library_path": ".\\VKLayerShaderChecker.dll",
- "api_version": "0.210.0",
- "implementation_version": "1",
- "description": "LunarG Validation Layer"
- }
-}
diff --git a/vk_layer_documentation_generate.py b/vk_layer_documentation_generate.py
index 497edc34..ccc0a8d8 100755
--- a/vk_layer_documentation_generate.py
+++ b/vk_layer_documentation_generate.py
@@ -61,8 +61,8 @@ layer_inputs = { 'draw_state' : {'header' : 'layers/draw_state.h',
'source' : 'layers/mem_tracker.cpp',
'generated' : False,
'error_enum' : 'MEM_TRACK_ERROR'},
- 'shader_checker' : {'header' : 'layers/shader_checker.h',
- 'source' : 'layers/shader_checker.cpp',
+ 'shader_checker' : {'header' : 'layers/draw_state.h',
+ 'source' : 'layers/draw_state.cpp',
'generated' : False,
'error_enum' : 'SHADER_CHECKER_ERROR'},
'threading' : {'header' : 'layers/threading.h',
@@ -137,8 +137,8 @@ class LayerParser:
# For each header file, parse details into dicts
# TODO : Should have a global dict element to track overall list of checks
store_enum = False
- for hf in self.header_files:
- layer_name = os.path.basename(hf).split('.')[0]
+ for layer_name in layer_inputs:
+ hf = layer_inputs[layer_name]['header']
self.layer_dict[layer_name] = {} # initialize a new dict for this layer
self.layer_dict[layer_name]['CHECKS'] = [] # enum of checks is stored in a list
#print('Parsing header file %s as layer name %s' % (hf, layer_name))