aboutsummaryrefslogtreecommitdiff
path: root/loader
diff options
context:
space:
mode:
authorJon Ashburn <jon@lunarg.com>2015-06-09 11:27:20 -0600
committerCourtney Goeltzenleuchter <courtney@LunarG.com>2015-06-18 10:22:55 -0600
commitded9bfa150e0ee35b58e458f42274befae62c0f5 (patch)
treec732cc2d43fc980a63e8f4586b6054ea71b61137 /loader
parent7f28f3e284c82b8f3fdf036a6bad4335c54683ee (diff)
downloadusermoji-ded9bfa150e0ee35b58e458f42274befae62c0f5.tar.xz
loader: Fix device layer activation to properly filter out repeat layers
Diffstat (limited to 'loader')
-rw-r--r--loader/loader.c29
-rw-r--r--loader/loader.h1
2 files changed, 16 insertions, 14 deletions
diff --git a/loader/loader.c b/loader/loader.c
index 56df3524..5279246d 100644
--- a/loader/loader.c
+++ b/loader/loader.c
@@ -1329,9 +1329,6 @@ extern uint32_t loader_activate_device_layers(
uint32_t ext_count,
const VkExtensionProperties *ext_props)
{
- uint32_t count;
- uint32_t layer_idx;
-
if (!icd)
return 0;
assert(gpu_index < MAX_GPUS_FOR_LAYER);
@@ -1343,31 +1340,36 @@ extern uint32_t loader_activate_device_layers(
VkBaseLayerObject *nextGpuObj;
PFN_vkGetDeviceProcAddr nextGPA = icd->GetDeviceProcAddr;
VkBaseLayerObject *wrappedGpus;
- count = 0;
+ /*
+ * Figure out how many actual layers will need to be wrapped.
+ */
for (uint32_t i = 0; i < icd->enabled_device_extensions[gpu_index].count; i++) {
struct loader_extension_property *ext_prop = &icd->enabled_device_extensions[gpu_index].list[i];
- if (ext_prop->origin == VK_EXTENSION_ORIGIN_LAYER) {
- count++;
+ if (ext_prop->alias) {
+ ext_prop = ext_prop->alias;
+ }
+ if (ext_prop->origin != VK_EXTENSION_ORIGIN_LAYER) {
+ continue;
}
+ loader_add_to_ext_list(&icd->activated_layer_list[gpu_index], 1, ext_prop);
}
- if (!count)
+
+ if (!icd->activated_layer_list[gpu_index].count)
return 0;
- icd->layer_count[gpu_index] = count;
+ icd->layer_count[gpu_index] = icd->activated_layer_list[gpu_index].count;
wrappedGpus = malloc(sizeof(VkBaseLayerObject) * icd->layer_count[gpu_index]);
if (! wrappedGpus) {
loader_log(VK_DBG_REPORT_ERROR_BIT, 0, "Failed to malloc Gpu objects for layer");
return 0;
}
- layer_idx = count - 1;
for (int32_t i = icd->layer_count[gpu_index] - 1; i >= 0; i--) {
- struct loader_extension_property *ext_prop = &icd->enabled_device_extensions[gpu_index].list[i];
+
+ struct loader_extension_property *ext_prop = &icd->activated_layer_list[gpu_index].list[i];
loader_platform_dl_handle lib_handle;
- if (ext_prop->origin != VK_EXTENSION_ORIGIN_LAYER) {
- continue;
- }
+ assert(ext_prop->origin == VK_EXTENSION_ORIGIN_LAYER);
nextGpuObj = (wrappedGpus + i);
nextGpuObj->pGPA = nextGPA;
@@ -1389,7 +1391,6 @@ extern uint32_t loader_activate_device_layers(
"Insert device layer library %s for extension: %s",
ext_prop->lib_name, ext_prop->info.name);
- layer_idx--;
}
loader_init_device_dispatch_table(icd->loader_dispatch + gpu_index, nextGPA,
diff --git a/loader/loader.h b/loader/loader.h
index 74478fc5..44ded0ba 100644
--- a/loader/loader.h
+++ b/loader/loader.h
@@ -122,6 +122,7 @@ struct loader_icd {
struct loader_extension_list device_extension_cache[MAX_GPUS_FOR_LAYER];
struct loader_extension_list enabled_device_extensions[MAX_GPUS_FOR_LAYER];
+ struct loader_extension_list activated_layer_list[MAX_GPUS_FOR_LAYER];
};
struct loader_instance {