0

I have problem with creating diffuse irradiance map. As reference I am using https://learnopengl.com/PBR/IBL/Diffuse-irradiance and https://github.com/TheEvilBanana/PhysicallyBasedRendering/blob/master/Game.cpp#L490.

My map convolution looks not as it is supposed. I have black dot on every face. I use 2048x2048 which I render to 256x256 Texture2D and save each face which I then assemble. Below debug image. Up is +Y, Down is -Y, Middle is from the left -X, -Z, +X, +Z. What can I do to fix it? Also it looks like I am rendering same face twice somehow because +(plus) faces looks same like their -(minus) faces.

Skybox / irradiance map

Render to texture method:

bool GraphicsClass::ConvoluteShader(ID3D11ShaderResourceView * srcTex, RenderTextureClass * dstTex)
{
ifstream skyboxFile;
skyboxFile.open("Skyboxes/conv_cubemap.dds");
if (skyboxFile.fail() == false)
    return true;

XMMATRIX worldMatrix, viewMatrix, projectionMatrix;
bool result;

XMFLOAT3 position = XMFLOAT3(0, 0, 0);
XMFLOAT3 up[] = { XMFLOAT3(0, 1, 0), XMFLOAT3(0, 0, 1), XMFLOAT3(1, 0, 0), XMFLOAT3(0, -1, 0), XMFLOAT3(0, 0, -1), XMFLOAT3(-1, 0, 0) };
wchar_t* filenames[] = { L"Skyboxes/conv_posx.dds", L"Skyboxes/conv_posy.dds" , L"Skyboxes/conv_posz.dds", L"Skyboxes/conv_negx.dds", L"Skyboxes/conv_negy.dds", L"Skyboxes/conv_negz.dds" };
for (int i = 0; i < 6; i++)
{
    dstTex->SetRenderTarget(m_D3D->GetDeviceContext(), m_D3D->GetDepthStencilView());
    dstTex->ClearRenderTarget(m_D3D->GetDeviceContext(), m_D3D->GetDepthStencilView(), 1.0f, 0.0f, 0.0f, 1.0f);

    m_Camera->Render();
    m_Camera->GetViewMatrix(viewMatrix);
    m_D3D->GetWorldMatrix(worldMatrix);
    m_D3D->GetProjectionMatrix(projectionMatrix);

    m_convoluteShader->SetUpVector(up[i]);

    m_D3D->ChangeRasterizerCulling(D3D11_CULL_BACK);
    m_D3D->ChangeDepthStencilComparison(D3D11_COMPARISON_LESS_EQUAL);

    m_quadModel->Render(m_D3D->GetDeviceContext());
    m_convoluteShader->Render(m_D3D->GetDeviceContext(), m_quadModel->GetIndexCount(), worldMatrix, viewMatrix, projectionMatrix);

    m_D3D->ChangeRasterizerCulling(D3D11_CULL_BACK);
    m_D3D->ChangeDepthStencilComparison(D3D11_COMPARISON_LESS);

    m_D3D->SetBackBufferRenderTarget();
    m_D3D->ResetViewport();

    m_renderTexturePreview->BindTexture(dstTex->GetShaderResourceView());

    SaveDDSTextureToFile(m_D3D->GetDeviceContext(), dstTex->GetShaderResource(), filenames[i]);
}

system("texassemble cube -w 256 -h 256 -f R8G8B8A8_UNORM -o Skyboxes/conv_cubemap.dds Skyboxes/conv_posx.dds Skyboxes/conv_negx.dds Skyboxes/conv_posy.dds Skyboxes/conv_negy.dds Skyboxes/conv_posz.dds Skyboxes/conv_negz.dds");

Vertex shader:

cbuffer MatrixBuffer
{
    matrix worldMatrix;
    matrix viewMatrix;
    matrix projectionMatrix;
};

struct VertexInputType
{
    float4 position : POSITION;    
};

struct PixelInputType
{
    float4 positionSV : SV_POSITION;
    float4 position : POSITION;
};


PixelInputType ColorVertexShader(VertexInputType input)
{
    PixelInputType output;

    output.position = input.position;
    input.position.w = 1.0f;

    output.positionSV = mul(input.position, worldMatrix).xyww;
    output.positionSV = mul(output.positionSV, viewMatrix).xyww;
    output.positionSV = mul(output.positionSV, projectionMatrix).xyww;

    return output;
}

Pixel Shader:

TextureCube shaderTexture;
SamplerState SampleType;

cbuffer UpVectorBuffer
{
    float3 g_upVectorVal;
    float g_upVectorPadding;
};

struct PixelInputType
{
    float4 positionSV : SV_POSITION;
    float4 position : POSITION;
};

static const float PI = 3.14159265359f;

float4 ColorPixelShader(PixelInputType input) : SV_TARGET
{
    float3 normal = normalize(input.position.xyz);

    float3 irradiance = float3(0.0f, 0.0f, 0.0f);

    float3 up = g_upVectorVal;
    float3 right = cross(up, normal);
    up = cross(normal, right);

    float sampleDelta = 0.025f;
    float nrSamples = 0.0f;
    for (float phi = 0.0f; phi < 2.0 * PI; phi += sampleDelta)
    {
        for (float theta = 0.0f; theta < 0.5 * PI; theta += sampleDelta)
        {
            float3 tangentSample = float3(sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta));
            float3 sampleVec = (tangentSample.x * right) + (tangentSample.y * up) + (tangentSample.z * normal);

            irradiance += shaderTexture.Sample(SampleType, sampleVec).rgb * cos(theta) * sin(theta);
            nrSamples++;
        }
    }
    irradiance = PI * irradiance * (1.0f / nrSamples);
    return float4(irradiance, 1.0f);
}
  • output.positionSV = mul(input.position, worldMatrix).xyww; << Should this not be xyzw ? – PaulHK Feb 20 '19 at 12:23
  • Unfortunatelly it isn't helpful. I believe that xyww component is needed because we are forcing 'z position' to be equal to 1 to make sure that object is a indefinetely far skybox which isn't moving. – DirectX_Programmer Feb 20 '19 at 19:14
  • These are 3d coordinates and I am guessing you are projecting 6 sides of a cube, so Z=1 is only true for one of the faces ? Or am I misunderstanding the vertex inputs ? One thing you could try is to output the world space coordinates of each pixel as a RGB colour (e.g. R=X, G=Y, B=Z) to test at least the 3d inputs are correct – PaulHK Feb 21 '19 at 02:00
  • 1
    @PaulHK is right, you are discarding `z` far too early. View and projection matrix don't apply correctly due to that. Also, you have a numerical issue, `up` and `normal` are parallel for the center of two of the surfaces. – Ext3h Feb 25 '19 at 08:16

1 Answers1

1

I've actually managed to find problem thanks to @PaulHK, thanks! I decided to pass which face I am currently working on and setting then coordinates manually per face. It is awful but it works rather good and since it is prefiltered and not done in runtime I found it good enough:

[branch] if (g_upVectorVal.z == 1.0f)
{
    input.position.z = -1.0f;
}
[branch] if (g_upVectorVal.z == -1.0f)
{
    input.position.z = 1.0f;
}
[branch] if (g_upVectorVal.x == 1.0f)
{
    input.position.z = input.position.x;
    input.position.x = -1.0f;
}
[branch] if (g_upVectorVal.x == -1.0f)
{
    input.position.z = -input.position.x;
    input.position.x = 1.0f;
}
[branch] if (g_upVectorVal.y == 1.0f)
{
    input.position.z = -input.position.y;
    input.position.y = -1.0f;
}
[branch] if (g_upVectorVal.y == -1.0f)
{
    input.position.z = -input.position.y;
    input.position.y = 1.0f;
}

About discarding z coordinate too early - since I decided to pass my data through g_upVectorVal variable I has no need for view/projection space so I discarded them fully. I am just using code below for my vertex shader:

PixelInputType ColorVertexShader(VertexInputType input)
{
    PixelInputType output;

    output.position = input.position;
    input.position.w = 1.0f;

    output.positionSV = mul(input.position, worldMatrix).xyww;

    return output;
}

IBL Diffuse