glsl: Address more feedback. Implement indexed texture reads
This commit is contained in:
parent
7d89a82a48
commit
e81c73a874
@ -559,53 +559,45 @@ std::string EmitContext::DefineGlobalMemoryFunctions() {
|
||||
}
|
||||
|
||||
void EmitContext::SetupImages(Bindings& bindings) {
|
||||
image_buffer_bindings.reserve(info.image_buffer_descriptors.size());
|
||||
image_buffers.reserve(info.image_buffer_descriptors.size());
|
||||
for (const auto& desc : info.image_buffer_descriptors) {
|
||||
image_buffer_bindings.push_back(bindings.image);
|
||||
const auto indices{bindings.image + desc.count};
|
||||
image_buffers.push_back({bindings.image, desc.count});
|
||||
const auto format{ImageFormatString(desc.format)};
|
||||
for (u32 index = bindings.image; index < indices; ++index) {
|
||||
header += fmt::format("layout(binding={}{}) uniform uimageBuffer img{};",
|
||||
bindings.image, format, index);
|
||||
}
|
||||
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
|
||||
header += fmt::format("layout(binding={}{}) uniform uimageBuffer img{}{};", bindings.image,
|
||||
format, bindings.image, array_decorator);
|
||||
bindings.image += desc.count;
|
||||
}
|
||||
image_bindings.reserve(info.image_descriptors.size());
|
||||
images.reserve(info.image_descriptors.size());
|
||||
for (const auto& desc : info.image_descriptors) {
|
||||
image_bindings.push_back(bindings.image);
|
||||
images.push_back({bindings.image, desc.count});
|
||||
const auto format{ImageFormatString(desc.format)};
|
||||
const auto image_type{ImageType(desc.type)};
|
||||
const auto qualifier{desc.is_written ? "" : "readonly "};
|
||||
const auto indices{bindings.image + desc.count};
|
||||
for (u32 index = bindings.image; index < indices; ++index) {
|
||||
header += fmt::format("layout(binding={}{})uniform {}{} img{};", bindings.image, format,
|
||||
qualifier, image_type, index);
|
||||
}
|
||||
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
|
||||
header += fmt::format("layout(binding={}{})uniform {}{} img{}{};", bindings.image, format,
|
||||
qualifier, image_type, bindings.image, array_decorator);
|
||||
bindings.image += desc.count;
|
||||
}
|
||||
}
|
||||
|
||||
void EmitContext::SetupTextures(Bindings& bindings) {
|
||||
texture_buffer_bindings.reserve(info.texture_buffer_descriptors.size());
|
||||
texture_buffers.reserve(info.texture_buffer_descriptors.size());
|
||||
for (const auto& desc : info.texture_buffer_descriptors) {
|
||||
texture_buffer_bindings.push_back(bindings.texture);
|
||||
texture_buffers.push_back({bindings.texture, desc.count});
|
||||
const auto sampler_type{SamplerType(TextureType::Buffer, false)};
|
||||
const auto indices{bindings.texture + desc.count};
|
||||
for (u32 index = bindings.texture; index < indices; ++index) {
|
||||
header += fmt::format("layout(binding={}) uniform {} tex{};", bindings.texture,
|
||||
sampler_type, index);
|
||||
}
|
||||
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
|
||||
header += fmt::format("layout(binding={}) uniform {} tex{}{};", bindings.texture,
|
||||
sampler_type, bindings.texture, array_decorator);
|
||||
bindings.texture += desc.count;
|
||||
}
|
||||
texture_bindings.reserve(info.texture_descriptors.size());
|
||||
textures.reserve(info.texture_descriptors.size());
|
||||
for (const auto& desc : info.texture_descriptors) {
|
||||
textures.push_back({bindings.texture, desc.count});
|
||||
const auto sampler_type{SamplerType(desc.type, desc.is_depth)};
|
||||
texture_bindings.push_back(bindings.texture);
|
||||
const auto indices{bindings.texture + desc.count};
|
||||
for (u32 index = bindings.texture; index < indices; ++index) {
|
||||
header += fmt::format("layout(binding={}) uniform {} tex{};", bindings.texture,
|
||||
sampler_type, index);
|
||||
}
|
||||
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
|
||||
header += fmt::format("layout(binding={}) uniform {} tex{}{};", bindings.texture,
|
||||
sampler_type, bindings.texture, array_decorator);
|
||||
bindings.texture += desc.count;
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,11 @@ struct GenericElementInfo {
|
||||
u32 num_components{};
|
||||
};
|
||||
|
||||
struct TextureImageDefinition {
|
||||
u32 binding;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
class EmitContext {
|
||||
public:
|
||||
explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
|
||||
@ -142,10 +147,10 @@ public:
|
||||
std::string_view stage_name = "invalid";
|
||||
std::string_view position_name = "gl_Position";
|
||||
|
||||
std::vector<u32> texture_buffer_bindings;
|
||||
std::vector<u32> image_buffer_bindings;
|
||||
std::vector<u32> texture_bindings;
|
||||
std::vector<u32> image_bindings;
|
||||
std::vector<TextureImageDefinition> texture_buffers;
|
||||
std::vector<TextureImageDefinition> image_buffers;
|
||||
std::vector<TextureImageDefinition> textures;
|
||||
std::vector<TextureImageDefinition> images;
|
||||
std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
|
||||
|
||||
bool uses_y_direction{};
|
||||
|
@ -12,20 +12,18 @@
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
namespace {
|
||||
std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info) {
|
||||
if (info.type == TextureType::Buffer) {
|
||||
return fmt::format("tex{}", ctx.texture_buffer_bindings.at(info.descriptor_index));
|
||||
} else {
|
||||
return fmt::format("tex{}", ctx.texture_bindings.at(info.descriptor_index));
|
||||
}
|
||||
std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
|
||||
const auto def{info.type == TextureType::Buffer ? ctx.texture_buffers.at(info.descriptor_index)
|
||||
: ctx.textures.at(info.descriptor_index)};
|
||||
const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
|
||||
return fmt::format("tex{}{}", def.binding, index_offset);
|
||||
}
|
||||
|
||||
std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info) {
|
||||
if (info.type == TextureType::Buffer) {
|
||||
return fmt::format("img{}", ctx.image_buffer_bindings.at(info.descriptor_index));
|
||||
} else {
|
||||
return fmt::format("img{}", ctx.image_bindings.at(info.descriptor_index));
|
||||
}
|
||||
std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
|
||||
const auto def{info.type == TextureType::Buffer ? ctx.image_buffers.at(info.descriptor_index)
|
||||
: ctx.images.at(info.descriptor_index)};
|
||||
const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
|
||||
return fmt::format("img{}{}", def.binding, index_offset);
|
||||
}
|
||||
|
||||
std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) {
|
||||
@ -137,14 +135,14 @@ IR::Inst* PrepareSparse(IR::Inst& inst) {
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view bias_lc, const IR::Value& offset) {
|
||||
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view bias_lc,
|
||||
const IR::Value& offset) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
if (info.has_lod_clamp) {
|
||||
throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
@ -175,9 +173,9 @@ void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view lod_lc, const IR::Value& offset) {
|
||||
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view lod_lc,
|
||||
const IR::Value& offset) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
if (info.has_bias) {
|
||||
throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples");
|
||||
@ -185,7 +183,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
if (info.has_lod_clamp) {
|
||||
throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
if (!sparse_inst) {
|
||||
@ -208,8 +206,7 @@ void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view dref,
|
||||
std::string_view bias_lc, const IR::Value& offset) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
@ -223,7 +220,7 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
if (info.has_lod_clamp) {
|
||||
throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
|
||||
const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
|
||||
const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
|
||||
@ -263,8 +260,7 @@ void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view dref,
|
||||
std::string_view lod_lc, const IR::Value& offset) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
@ -278,7 +274,7 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
if (info.has_lod_clamp) {
|
||||
throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
|
||||
const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext};
|
||||
const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
|
||||
@ -313,10 +309,10 @@ void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst,
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, const IR::Value& offset, const IR::Value& offset2) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
if (!sparse_inst) {
|
||||
@ -355,11 +351,11 @@ void EmitImageGather(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR
|
||||
info.gather_component);
|
||||
}
|
||||
|
||||
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, const IR::Value& offset, const IR::Value& offset2,
|
||||
std::string_view dref) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
if (!sparse_inst) {
|
||||
@ -395,7 +391,7 @@ void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] cons
|
||||
*sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel);
|
||||
}
|
||||
|
||||
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view offset, std::string_view lod,
|
||||
[[maybe_unused]] std::string_view ms) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
@ -405,7 +401,7 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR:
|
||||
if (info.has_lod_clamp) {
|
||||
throw NotImplementedException("EmitImageFetch Lod clamp samples");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
if (!sparse_inst) {
|
||||
@ -433,10 +429,10 @@ void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR:
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view lod) {
|
||||
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view lod) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
switch (info.type) {
|
||||
case TextureType::Color1D:
|
||||
return ctx.AddU32x4(
|
||||
@ -460,14 +456,14 @@ void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst,
|
||||
throw LogicError("Unspecified image type {}", info.type.Value());
|
||||
}
|
||||
|
||||
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords);
|
||||
}
|
||||
|
||||
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, const IR::Value& derivatives,
|
||||
const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
@ -481,7 +477,7 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const
|
||||
if (!offset.IsEmpty()) {
|
||||
throw NotImplementedException("EmitImageGradient offset");
|
||||
}
|
||||
const auto texture{Texture(ctx, info)};
|
||||
const auto texture{Texture(ctx, info, index)};
|
||||
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
|
||||
const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp};
|
||||
const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)};
|
||||
@ -494,65 +490,60 @@ void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const
|
||||
}
|
||||
}
|
||||
|
||||
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto sparse_inst{PrepareSparse(inst)};
|
||||
if (sparse_inst) {
|
||||
throw NotImplementedException("EmitImageRead Sparse");
|
||||
}
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, TexelFetchCastToInt(coords, info));
|
||||
}
|
||||
|
||||
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view color) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.Add("imageStore({},{},{});", image, TexelFetchCastToInt(coords, info), color);
|
||||
}
|
||||
|
||||
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
@ -567,35 +558,34 @@ void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string
|
||||
NotImplemented();
|
||||
}
|
||||
|
||||
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, [[maybe_unused]] const IR::Value& index,
|
||||
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
||||
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst,
|
||||
[[maybe_unused]] const IR::Value& index, std::string_view coords,
|
||||
std::string_view value) {
|
||||
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
|
||||
std::string_view coords, std::string_view value) {
|
||||
const auto info{inst.Flags<IR::TextureInstInfo>()};
|
||||
const auto image{Image(ctx, info)};
|
||||
const auto image{Image(ctx, info, index)};
|
||||
ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, TexelFetchCastToInt(coords, info),
|
||||
value);
|
||||
}
|
||||
|
@ -28,12 +28,12 @@ void EmitSelectU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::stri
|
||||
|
||||
void EmitSelectU32(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
|
||||
std::string_view true_value, std::string_view false_value) {
|
||||
ctx.AddU32("{}={}?uint({}):uint({});", inst, cond, true_value, false_value);
|
||||
ctx.AddU32("{}={}?{}:{};", inst, cond, true_value, false_value);
|
||||
}
|
||||
|
||||
void EmitSelectU64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
|
||||
std::string_view true_value, std::string_view false_value) {
|
||||
ctx.AddU64("{}={}?uint64_t({}):uint64_t({});", inst, cond, true_value, false_value);
|
||||
ctx.AddU64("{}={}?{}:{};", inst, cond, true_value, false_value);
|
||||
}
|
||||
|
||||
void EmitSelectF16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view cond,
|
||||
|
@ -9,6 +9,17 @@
|
||||
#include "shader_recompiler/frontend/ir/value.h"
|
||||
|
||||
namespace Shader::Backend::GLSL {
|
||||
namespace {
|
||||
constexpr char cas_loop[]{"for(;;){{uint old_value={};uint "
|
||||
"cas_result=atomicCompSwap({},old_value,bitfieldInsert({},{},{},{}));"
|
||||
"if(cas_result==old_value){{break;}}}}"};
|
||||
|
||||
void SharedWriteCas(EmitContext& ctx, std::string_view offset, std::string_view value,
|
||||
std::string_view bit_offset, u32 num_bits) {
|
||||
const auto smem{fmt::format("smem[{}>>2]", offset)};
|
||||
ctx.Add(cas_loop, smem, smem, smem, value, bit_offset, num_bits);
|
||||
}
|
||||
} // Anonymous namespace
|
||||
void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
|
||||
ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int({}%4)*8,8);", inst, offset, offset);
|
||||
}
|
||||
@ -39,13 +50,13 @@ void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, std::string_view offse
|
||||
}
|
||||
|
||||
void EmitWriteSharedU8(EmitContext& ctx, std::string_view offset, std::string_view value) {
|
||||
ctx.Add("smem[{}>>2]=bitfieldInsert(smem[{}>>2],{},int({}%4)*8,8);", offset, offset, value,
|
||||
offset);
|
||||
const auto bit_offset{fmt::format("int({}%4)*8", offset)};
|
||||
SharedWriteCas(ctx, offset, value, bit_offset, 8);
|
||||
}
|
||||
|
||||
void EmitWriteSharedU16(EmitContext& ctx, std::string_view offset, std::string_view value) {
|
||||
ctx.Add("smem[{}>>2]=bitfieldInsert(smem[{}>>2],{},int(({}>>1)%2)*16,16);", offset, offset,
|
||||
value, offset);
|
||||
const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset)};
|
||||
SharedWriteCas(ctx, offset, value, bit_offset, 16);
|
||||
}
|
||||
|
||||
void EmitWriteSharedU32(EmitContext& ctx, std::string_view offset, std::string_view value) {
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
namespace OpenGL {
|
||||
|
||||
static void LogShader(GLuint shader, std::optional<std::string_view> code = {}) {
|
||||
static void LogShader(GLuint shader, std::string_view code = {}) {
|
||||
GLint shader_status{};
|
||||
glGetShaderiv(shader, GL_COMPILE_STATUS, &shader_status);
|
||||
if (shader_status == GL_FALSE) {
|
||||
@ -28,8 +28,8 @@ static void LogShader(GLuint shader, std::optional<std::string_view> code = {})
|
||||
glGetShaderInfoLog(shader, log_length, nullptr, log.data());
|
||||
if (shader_status == GL_FALSE) {
|
||||
LOG_ERROR(Render_OpenGL, "{}", log);
|
||||
if (code.has_value()) {
|
||||
LOG_INFO(Render_OpenGL, "\n{}", *code);
|
||||
if (!code.empty()) {
|
||||
LOG_INFO(Render_OpenGL, "\n{}", code);
|
||||
}
|
||||
} else {
|
||||
LOG_WARNING(Render_OpenGL, "{}", log);
|
||||
|
Loading…
Reference in New Issue
Block a user