Vulkan Compute Shader :: Shader Storage Solution

Hello guys,

Back in the community, took a look around, but still I have problems…

Here is my Compute Shader…

#version 310 es
layout (local_size_x=1024) in;
layout(set = 0, binding = 0) buffer Test{
float f[];
} test;
void main(){
test.f[0] = 3.0f;
}

It Compiles fine, as I’ve made it through the days where my gpu wasn’t proud of it.

My task is to turn test.f[0] from 1.0f, to 3.0f.

Before presenting the pipeline for build and execute the compute shader, you should know that the program crashes when I call vkBeginCommandBuffer

Using other pipelines I lined up, similar to below, there was no crash, no errors showed anywhere, and the compute shader still would not turn the 1.0 to a 3.0.

The buffer is in perfect condition on the gpu as expected, and I’ve verified this previously by mapping the buffer, writing to the buffer, flushing, and reading the result. I first made the mistake of binding the buffer with vkBindVertexBuffers, but I made the switch to descriptorsets as per the api specs, so maybe the error is in my implementation of a descriptor set.

Anyways, the pipeline:

vkCreateShaderModule(lDevice, &shaderInfo, nullptr, &shaderTag);
delete[] shaderInfo.pCode;
shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStageInfo.module = shaderTag;
shaderStageInfo.pName = "main";
shaderStageInfo.stage = VK_SHADER_STAGE_COMPUTE_BIT;

VkDescriptorPoolSize size;
size.descriptorCount = 1;
size.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;

VkDescriptorPoolCreateInfo descriptorPoolInfo;
descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptorPoolInfo.poolSizeCount = 1;
descriptorPoolInfo.pPoolSizes = &size;
descriptorPoolInfo.maxSets = 1;
vkCreateDescriptorPool(lDevice, &descriptorPoolInfo, nullptr, &descriptorPool);

VkDescriptorSetLayoutBinding descriptorSetLayoutBinding;
descriptorSetLayoutBinding.binding = 0;
descriptorSetLayoutBinding.descriptorCount = 1;
descriptorSetLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
descriptorSetLayoutBinding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
descriptorInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
descriptorInfo.bindingCount = 1;
descriptorInfo.pBindings = &descriptorSetLayoutBinding;

vkCreateDescriptorSetLayout(lDevice, &descriptorInfo, nullptr, &descriptorTag);

VkDescriptorSetAllocateInfo descriptorSetCiiInfo;
descriptorSetCiiInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
descriptorSetCiiInfo.descriptorPool = descriptorPool;
descriptorSetCiiInfo.descriptorSetCount = 1;
descriptorSetCiiInfo.pSetLayouts = &descriptorTag;
vkAllocateDescriptorSets(lDevice, &descriptorSetCiiInfo, &descriptorSet);
VkDescriptorBufferInfo bufferInfo;
bufferInfo.buffer = bufferTag;
bufferInfo.offset = 0;
bufferInfo.range = VK_WHOLE_SIZE;

VkWriteDescriptorSet writeDescriptorSetInfo;
writeDescriptorSetInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writeDescriptorSetInfo.dstSet = descriptorSet;
writeDescriptorSetInfo.dstBinding = 0;
writeDescriptorSetInfo.dstArrayElement = 0;
writeDescriptorSetInfo.descriptorCount = 1;
writeDescriptorSetInfo.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
writeDescriptorSetInfo.pBufferInfo = &bufferInfo;
vkUpdateDescriptorSets(lDevice, 1, &writeDescriptorSetInfo, 0, nullptr);

VkPipelineLayoutCreateInfo layoutInfo;
layoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
layoutInfo.setLayoutCount = 0;
layoutInfo.pSetLayouts = &descriptorTag;
layoutInfo.pushConstantRangeCount = 0;
layoutInfo.pPushConstantRanges = nullptr;
vkCreatePipelineLayout(lDevice, &layoutInfo, nullptr, &layoutTag);

VKComputePipeline tag;
VkComputePipelineCreateInfo pipeLineInfo;
pipeLineInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
pipeLineInfo.stage = shaderStageInfo;
pipeLineInfo.layout = layoutTag;
vkCreateComputePipelines(lDevice, VK_NULL_HANDLE, 1, &pipeLineInfo, nullptr, &tag);

VkCommandPool poolTag;
VkCommandPoolCreateInfo poolInfo;
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = computeKyuIndex;
vkCreateCommandPool(lDevice, &poolInfo, nullptr, &poolTag);

VkCommandBuffer commandBuffer;
VkCommandBufferAllocateInfo commandBuffersCiiInfo;
commandBuffersCiiInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
commandBuffersCiiInfo.commandBufferCount = 1;
commandBuffersCiiInfo.commandPool = poolTag;
commandBuffersCiiInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(lDevice, &commandBuffersCiiInfo, &commandBuffer);

VkCommandBufferBeginInfo commandBufferBeginInfo;
commandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(commandBuffer, &commandBufferBeginInfo);
vkCmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, layoutTag, 0, 1, &descriptorSet, 0, nullptr);
vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, tag);
vkCmdDispatch(commandBuffer, 1, 1, 1);
vkEndCommandBuffer(commandBuffer);

VkSubmitInfo submitInfo;
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;

float *buffer;
vkMapMemory(lDevice, memTag, 0, 4, 0, (void **)&buffer);

VkMappedMemoryRange range;
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.offset = 0;
range.size = 4;
range.memory = memTag;

while (true){
vkInvalidateMappedMemoryRanges(lDevice, 1, &range);
LOGI("Value :: %d", (int)buffer[0]);
vkQueueWaitIdle(computeKyuTag);
vkQueueSubmit(computeKyuTag, 1, &submitInfo, VK_NULL_HANDLE);
}

I’ve tried to make this as simple as possible, made no progress. Now I open up to the community. aha

Please let me know of anything I’m missing…

A crash at (a second call) to vkBeginCommandBuffer often indicates usage outside of the Vulkan specs. If you run your application with validation layers enabled, I’m pretty sure you’ll see several warnings and errors that should help you find the cause for your problems.

And looking at your code snippet I’m kinda sure your command order is a bit off. It looks like you’re trying to get the buffer value back before actually submitting the command buffer to the queue?

Now I have 2 problems… :frowning:

2: I only see 1 call to vkBeginCommandBuffer.

1: The first problem is no longer the same. I took your advice and activated validation layers to full capacity! Cleared the errors, but not the crash. Its now detected @vkUpdateDescriptorSets. Mainly, I get a fault addr error, though all previous vulkan calls succeeded.

No more errors are to be found, I’ve tweaked struct values to find the crash, still no progress.

I’ll continue to research as you suggest this is not a problem for the community.

I found the cause of the crash, and it appears when I do not create a render pass.

Why does this dependency exist?

I’ve read that vulkan, unlike open gl, does not require a draw call each frame, and can be used strictly for compute intensive work loads. http://www.duskborn.com/posts/a-simple-vulkan-compute-example/

Is this wrong? Is this experience the result of a vulkan implementation bug via the device manu?

No, there is no need for a render pass for compute only samples. I tested my own headless compute sample on dozens of implementations, and all of them worked fine. So I’m sure it’s an application bug, and not a device bug. But that’s hard to judge without the code and the actual error message you get.

Okay…

Completely refurnished the envo to just build a buffer, but now it crashes everytime I try to allocate memory.

The code I am using is completely in line with existing tutorials, & I do not know where I am going wrong with any of this.

(This code simply makes buffer, nothing else, but it returns
Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr 0x6 @vkAllocateMemory)

Heres my code:

  // Memory
  // :: Buffer Creation
  VkBuffer buffer;
  VkBufferCreateInfo bufferCreateInfo;
  bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
  bufferCreateInfo.size = 4;
  bufferCreateInfo.queueFamilyIndexCount = 1;
  bufferCreateInfo.pQueueFamilyIndices = &device.queueFamilyIndex_;
  bufferCreateInfo.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
  bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
  vkCreateBuffer(device.device_, &bufferCreateInfo, nullptr, &buffer);

  // :: Buffer Memory Requirements
  VkMemoryRequirements memoryRequirements;
  vkGetBufferMemoryRequirements(device.device_, buffer, &memoryRequirements);

  // :: Physical Device Memory Properties
  VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties;
  vkGetPhysicalDeviceMemoryProperties(device.gpuDevice_, &physicalDeviceMemoryProperties);

  // :: Get Memory Index
  VkMemoryPropertyFlagBits memoryPropertyFlagBits = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
  uint32_t memoryIndex = 0;
  while (memoryIndex < physicalDeviceMemoryProperties.memoryTypeCount){
    if (memoryRequirements.memoryTypeBits & (1 << memoryIndex) && (physicalDeviceMemoryProperties.memoryTypes[memoryIndex].propertyFlags & memoryPropertyFlagBits) == memoryPropertyFlagBits)
      break;
    ++memoryIndex;
  }

  VkDeviceMemory deviceMemory;
  VkMemoryAllocateInfo memoryAllocateInfo;
  memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
  memoryAllocateInfo.allocationSize = 4;
  memoryAllocateInfo.memoryTypeIndex = memoryIndex;
  vkAllocateMemory(device.device_, &memoryAllocateInfo, nullptr, &deviceMemory);
  vkBindBufferMemory(device.device_, buffer, deviceMemory, 0);

This is running in the same environment as a working tutorial code sample, but the moment I add this block of code, it crashes.

This topic was automatically closed 183 days after the last reply. New replies are no longer allowed.