Custom shader - Unity - Diffuse+Lightmap+normal+cubemap - c++

I've been working on a custom shader for a project using Unity 4.6 because Unity's shaders offers a great variety of options but not the one i'm looking for.
I've looked on Stackoverflow about my shader's issue, but every question is about tricky and higly technical issue using shader. I think mine is quite simple (for an experienced developper) but haven't been posted yet.
Here is the problem :
I want to merge 2 shaders to get a "Diffuse+normal+cubemap+lighmap" shader.
So, on one side I have a "Diffuse + NormalMap + LightMap" shaders which looks like this (it's the legacy/lighmap bumpedspec a with a little tweaking to get the specular shinyness):
Shader "Legacy Shaders/Lightmapped/Custom/BumpedSpec" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_SpecColor ("Specular Color", Color) = (0.5, 0.5, 0.5, 1)
_Shininess ("Shininess", Range (0.03, 1)) = 0.078125
_MainTex ("Base (RGB)", 2D) = "white" {}
_BumpMap ("Normalmap", 2D) = "bump" {}
_LightMap ("Lightmap (RGB)", 2D) = "black" {}
}
SubShader {
LOD 200
Tags { "RenderType" = "Opaque" }
CGPROGRAM
#pragma surface surf BlinnPhong
struct Input {
float2 uv_MainTex;
float2 uv_BumpMap;
float2 uv2_LightMap;
};
sampler2D _MainTex;
sampler2D _LightMap;
sampler2D _BumpMap;
float4 _Color;
float _Shininess;
void surf (Input IN, inout SurfaceOutput o)
{
half4 tex = tex2D (_MainTex, IN.uv_MainTex);
o.Albedo = tex.rgb * _Color;
half4 lm = tex2D (_LightMap, IN.uv2_LightMap);
o.Emission = lm.rgb*o.Albedo.rgb;
o.Gloss = tex.a;
o.Alpha = lm.a * _Color.a;
o.Specular = _Shininess;
o.Normal = UnpackNormal(tex2D(_BumpMap, IN.uv_BumpMap));
}
ENDCG
}
FallBack "Legacy Shaders/Lightmapped/VertexLit"
}
And on the other side, i've got a shader with "Diffuse+cubemap+Lightmap" which looks like this :
Shader "Custom/CubeLightmap" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_ReflectColor ("Reflection Color", Color) = (1,1,1,0.5)
_MainTex ("Base (RGB) RefStrength (A)", 2D) = "white" {}
_Cube ("Reflection Cubemap", Cube) = "_Skybox" { TexGen CubeReflect }
_LightMap ("Lightmap (RGB)", 2D) = "lightmap" { LightmapMode }
}
SubShader {
LOD 200
Tags { "RenderType"="Opaque" }
CGPROGRAM
#pragma surface surf Lambert
sampler2D _MainTex;
samplerCUBE _Cube;
sampler2D _LightMap;
fixed4 _Color;
fixed4 _ReflectColor;
struct Input {
float2 uv_MainTex;
float3 worldRefl;
float2 uv2_LightMap;
};
void surf (Input IN, inout SurfaceOutput o) {
fixed4 tex = tex2D(_MainTex, IN.uv_MainTex);
fixed4 c = tex * _Color;
o.Albedo = c.rgb;
half4 lm = tex2D(_LightMap,IN.uv2_LightMap);
fixed4 reflcol = texCUBE (_Cube, IN.worldRefl);
reflcol *= tex.a;
o.Emission = lm.rgb * reflcol.rgb * _ReflectColor.rgb;
o.Alpha = reflcol.a * _ReflectColor.a * lm.a;
}
ENDCG
}
FallBack "Reflective/VertexLit"
}
So I want to merge both off them (a.k.a include cubemap in the first one or include normalmap in the second one) and I can't figure it out for the moment.
So I'm in need of some advice or help to achieve it.
Thanks in advance,
Regards

Sounds to me like you are looking to create a radiosity normal mapping shader. This will require you to at least know the basics of C++, and HSLS
The biggest problem you will encounter will be how to compute radiosity normal maps, as this requires a specially crafted light mapper. The only software I know of that does this is Beast by AutoDesk
After that you will need some simple shaders. There is some well documented explanations, and relevant code located at
Half Life 2 shading
Valve Software

Related

Custom Fragment Shaders with SkiaSharp

I'm evaluating SkiaSharp for use in a project which allows for rendering some vector elements on top of a photo. SkiaSharp looks well suited to rendering the vector elements, however I also want to apply transformations to the image -- tone-curve adjustments like contrast or exposure, for example.
The ideal scenario would be a way to execute custom GLSL fragment shader code when rendering the rectangle containing the image (eg an SKColorFilter subclass that just wraps GLSL code, to be applied when calling DrawBitmap, or similar).
It would probably also work to have my existing GLSL code render to a texture, and then have SkiaSharp draw a rectangle using that texture as its contents. However, I can't see a way to do this without a GPU read-back, which feels like it would be prohibitively slow.
What's the best way forward here? More precisely: either, how can I apply transformations to the pixel data of an image, on the GPU, in SkiaSharp, or, what else could I use to provide SkiaSharp like vector rendering primitives and also allow GLSL fragment-shader-like pixel transformations?
You could have a look at some really early previews of the v2.84 series. They have a new SKRuntimeEffect that you can make use of.
Check out the preview feed: https://aka.ms/skiasharp-eap/index.json
You must have a look at the builds for the https://github.com/mono/SkiaSharp/pull/1321 PR:
v2.84.0-pr.1321.*
See this example: https://github.com/mono/SkiaSharp/issues/1319#issuecomment-640529824
// input values
SKCanvas canvas = ...;
float threshold = 1.05f;
float exponent = 1.5f;
// shader
var src = #"
in fragmentProcessor color_map;
uniform float scale;
uniform half exp;
uniform float3 in_colors0;
void main(float2 p, inout half4 color) {
half4 texColor = sample(color_map, p);
if (length(abs(in_colors0 - pow(texColor.rgb, half3(exp)))) < scale)
discard;
color = texColor;
}";
using var effect = SKRuntimeEffect.Create(src, out var errorText);
// input values
var inputs = new SKRuntimeEffectInputs(effect);
inputs.Set("scale", threshold);
inputs.Set("exp", exponent);
inputs.Set("in_colors0", new[] { 1f, 1f, 1f });
// shader values
using var blueShirt = SKImage.FromEncodedData(Path.Combine(PathToImages, "blue-shirt.jpg"));
using var textureShader = blueShirt.ToShader();
var children = new SKRuntimeEffectChildren(effect);
children.Set("color_map", textureShader);
// create actual shader
using var shader = effect.ToShader(inputs, children, true);
// draw as normal
canvas.Clear(SKColors.Black);
using var paint = new SKPaint { Shader = shader };
canvas.DrawRect(SKRect.Create(400, 400), paint);

DirectX11 pixel shader in pipeline is missing

I'm writing a program which displays a MS3D model using DirectX, and unfortunately, the result shows nothing on the screen.
When I use the Graphics Debugger from Visual Studio 13, I notice that the pixel shader is missing from the pipeline, as it is shown in the below picture
This is my pixel shader source code:
cbuffer SkinningTransforms
{
matrix WorldMatrix;
matrix ViewProjMatrix;
};
//--------------------------------------------------------------------------------
// Inter-stage structures
//--------------------------------------------------------------------------------
struct VS_INPUT
{
float3 position : POSITION;
int4 bone : BONEID;
float4 weights : BONEWEIGHT;
float3 normal : NORMAL;
float3 tangent : TANGENT;
float2 tex : TEXCOORD;
};
//--------------------------------------------------------------------------------
struct VS_OUTPUT
{
float4 position : SV_Position;
float3 normal : NORMAL;
float3 light : LIGHT;
float2 tex : TEXCOORDS;
};
Texture2D ColorTexture : register( t0 );
SamplerState LinearSampler : register( s0 );
//--------------------------------------------------------------------------------
VS_OUTPUT VSMAIN( in VS_INPUT input )
{
VS_OUTPUT output;
//Transform vertex and pass them to the pixel shader
return output;
}
//--------------------------------------------------------------------------------
float4 PSMAIN( in VS_OUTPUT input ) : SV_Target
{
// Calculate the lighting
float3 n = normalize( input.normal );
float3 l = normalize( input.light );
float4 texColor = ColorTexture.Sample( LinearSampler, input.tex );
float4 color = texColor * (max(dot(n,l),0) + 0.05f );
return( color );
}
As I was known from Graphics Debugger, all of the graphics event are right. I listed in below important events, which might be relating to Pixel Shader:
106:(obj:4) ID3D11Device::CreateDepthStencilView(obj:24,NULL,obj:25)*
108:(obj:5) ID3D11DeviceContext::OMSetRenderTargets(8,{obj:1,NULL,NULL,NULL,NULL,NULL,NULL,NULL},obj:25)*
109:(obj:5) ID3D11DeviceContext::ClearRenderTargetView(obj:1,addr:21)*
111:(obj:5) ID3D11DeviceContext::ClearDepthStencilView(obj:25,1,1.000f,0)*
119:(obj:4) ID3D11Device::CreateSamplerState(addr:24,obj:27)*
134:(obj:4) ID3D11Device::CreatePixelShader(addr:27,21056,NULL,obj:30)*
135:CreateObject(D3D11 Pixel Shader,obj:30)
136:(obj:5) ID3D11DeviceContext::PSSetShader(obj:30,NULL,0)*
137:(obj:5) ID3D11DeviceContext::PSSetSamplers(0,1,{obj:27})*
139:(obj:4) ID3D11Device::CreateTexture2D(addr:28,addr:5,obj:31)*
140:CreateObject(D3D11 Texture2D,obj:31)
142:(obj:4) ID3D11Device::CreateShaderResourceView(obj:31,NULL,obj:32)*
143:CreateObject(D3D11 Shader Resource View,obj:32)
144:(obj:5) ID3D11DeviceContext::PSSetShaderResources(0,1,{obj:32})*
146:(obj:4) ID3D11Device::CreateRasterizerState(addr:29,obj:33)*
147:CreateObject(D3D11 Rasterizer State,obj:33)
152:(obj:5) ID3D11DeviceContext::RSSetState(obj:33)*
154:(obj:5) ID3D11DeviceContext::RSSetViewports(1,addr:30)*
156:(obj:4) ID3D11Device::CreateBlendState(addr:11,obj:34)*
157:CreateObject(D3D11 Blend State,obj:34)
159:(obj:5) ID3D11DeviceContext::OMSetBlendState(obj:34,addr:31,-1)*
162:(obj:4) ID3D11Device::CreateDepthStencilState(addr:32,obj:35)*
163:CreateObject(D3D11 Depth-Stencil State,obj:35)
165:(obj:5) ID3D11DeviceContext::OMSetDepthStencilState(obj:35,0)*
I debugged all of the function in the above list, and all of them return OK. Nothing wrong.
My question is what is the reason the pixex shader is missing from pipleline, which in turn may result in the empty screen.
Adding to the other answers, constant buffer organization can be the cause of this problem. In my case, the pixel shader was missing from the pipeline but also the vertex shader wasn't transforming the vertices correctly. Upon inspection it was revealed that the world matrix had incorrect values because the boolean value at the top of the constant buffer was causing data misalignment. HLSL packs data into 16 byte boundries which are so called vectors that have 4 components. A boolean is 4 bytes which is the same with a float.
cbuffer cbPerObject : register( b1 )
{
bool gUseTexture ;
row_major float4x4 gWorld ;
row_major float4x4 gWorldInvTranspose ;
row_major float4x4 gWorldViewProj ;
row_major float4x4 gTexTransform ;
Material gMaterial ;
} ;
So in the constant buffer above, the boolean + the first 3 components of the first row of the world matrix gets mapped to the first vector and this causes everything to get shifted by 3 components, misaligning the world matrix (and the other matrices following and possibly other data).
Two possible solutions :
Move the boolean to the end of the structure. I did this and it worked.
Add a 3-component sized padding variable between the world matrix and the boolean.
I tried this by adding an XMFLOAT3 in the c++ structure and a float3 in HLSL. This worked too.
Long story short, pay attention to HLSL packing.
EDIT : At the time I thought these methods worked, as all variables except the boolean had correct values. I didn't use the bool at the time so I assumed that was fine too. Turns out it's not.
HLSL bools and c++ bools have different sizes. HLSL bools are 4 bytes, whereas c++ bools are implementation defined (1 byte on my machine for example). Anyways, they will most likely be different and it causes problems.
Either use Windows BOOL type or another appropriately-sized value like an int or a uint.
Take a look at https://gamedev.stackexchange.com/a/22605.
Also the second to last post here explains the situation clearly (this link is referenced in the answer in the gamedev link above also).
Beware though because packing is still an issue. Even if you use a BOOL or a uint or whatever, if you place it in the beginning in the above structure as before, you will get incorrect values in your constant buffer. So take both of these issues (data alignment and the boolean problem) into consideration when working with constant buffers.
As I wrote in my comment, I had a similar problem.
In my case, the pixel shader was correctly bound (see http://msdn.microsoft.com/en-us/library/jj191650.aspx). Furthermore, I ensured by debugging the vertex shader that the result of the Transformation should be visible and hence should generate visible fragments.
In this case (which seems to be the same you describe), make sure that your rasteriser state is correct. You might want to check that it is actually set (using the graphics object view of the immediate context) and that it lets your geometry through. For debugging purposes, I found it helpful disabling backface culling. I use
D3D11_RASTERIZER_DESC rasterDesc;
ZeroMemory(&rasterDesc, sizeof(rasterDesc));
rasterDesc.CullMode = D3D11_CULL_MODE::D3D11_CULL_NONE;
rasterDesc.FillMode = D3D11_FILL_MODE::D3D11_FILL_SOLID;

Precise Texture Overlay

I'm trying to set up a two-stage render of objects in a 3D engine I'm working on written in C++ with DirectX9 to facilitate transparency (and other things). I thought it was all working nicely until I noticed some dodgyness on the edge of objects rendered before objects using this two stage method.
The two stage method is simple:
Draw model to off-screen ("side") texture of same size using same zbuffer (no MSAA is used anywhere)
Draw off-screen ("side") texture over the top of the main render target with a suitable blend and no alpha test or write
In the image below the left view is with the two stage render of the gray object (a lamppost) with the body in-front of it rendered directly to the target texture. The right view is with the two-stage render disabled, so both are rendered directly onto the target surface.
On close inspection it is as if the side texture is offset by exactly 1 pixel "down" and 1 pixel "right" when rendered over the target surface (but is rendered correctly in-place). This can be seen in an overlay of the off screen texture (which I get my program to write out to a bitmap file via D3DXSaveTextureToFile) over a screen shot below.
One last image so you can see where the edge in the side texture is coming from (it's because rendering to the side texture does use z test). Left is screen short, right is side texture (as overlaid above).
All this leads me to believe that my "overlaying" isn't very effective. The code that renders the side texture over the main render target is shown below (note that the same viewport is used for all scene rendering (on and off screen)). The "effect" object is an instance of a thin wrapper over LPD3DXEFFECT, with the "effect" field (sorry about shoddy naming) being a LPD3DXEFFECT itself.
void drawSideOver(LPDIRECT3DDEVICE9 dxDevice, drawData* ddat)
{ // "ddat" drawdata contains lots of render state information, but all we need here is the handles for the targetSurface and sideSurface
D3DXMATRIX idMat;
D3DXMatrixIdentity(&idMat); // create identity matrix
dxDevice->SetRenderTarget(0, ddat->targetSurface); // switch to targetSurface
dxDevice->SetRenderState(D3DRS_ZENABLE, false); // disable z test and z write
dxDevice->SetRenderState(D3DRS_ZWRITEENABLE, false);
vertexOver overVerts[4]; // create square
overVerts[0] = vertexOver(-1, -1, 0, 0, 1);
overVerts[1] = vertexOver(-1, 1, 0, 0, 0);
overVerts[2] = vertexOver(1, -1, 0, 1, 1);
overVerts[3] = vertexOver(1, 1, 0, 1, 0);
effect.setTexture(ddat->sideTex); // use side texture as shader texture ("tex")
effect.effect->SetTechnique("over"); // change to "over" technique
effect.setViewProj(&idMat); // set viewProj to identity matrix so 1/-1 map directly
effect.effect->CommitChanges();
setAlpha(dxDevice); // this sets up the alpha blending which works fine
UINT numPasses, pass;
effect.effect->Begin(&numPasses, 0);
effect.effect->BeginPass(0);
dxDevice->SetVertexDeclaration(vertexDecOver);
dxDevice->DrawPrimitiveUP(D3DPT_TRIANGLESTRIP, 2, overVerts, sizeof(vertexOver));
effect.effect->EndPass();
effect.effect->End();
dxDevice->SetRenderState(D3DRS_ZENABLE, true); // revert these so we don't mess everything up drawn after this
dxDevice->SetRenderState(D3DRS_ZWRITEENABLE, true);
}
The C++ side definition for the VertexOver struct and constructor (HLSL side shown below somewhere):
struct vertexOver
{
public:
float x;
float y;
float z;
float w;
float tu;
float tv;
vertexOver() { }
vertexOver(float xN, float yN, float zN, float tuN, float tvN)
{
x = xN;
y = yN;
z = zN;
w = 1.0;
tu = tuN;
tv = tvN;
}
};
Inefficiency in re-creating and passing the vertices down to the GPU each draw aside, what I really want to know is why this method doesn't quite work, and if there are any better methods for overlaying textures like this with an alpha blend that won't exhibit this issue
I figured that the texture sampling may matter somewhat in this matter, but messing about with options didn't seem to help much (for example, using a LINEAR filter just makes it fuzzy as you might expect implying that the offset isn't as clear-cut as a 1 pixel discrepancy). Shader code:
struct VS_Input_Over
{
float4 pos : POSITION0;
float2 txc : TEXCOORD0;
};
struct VS_Output_Over
{
float4 pos : POSITION0;
float2 txc : TEXCOORD0;
float4 altPos : TEXCOORD1;
};
struct PS_Output
{
float4 col : COLOR0;
};
Texture tex;
sampler texSampler = sampler_state { texture = <tex>;magfilter = NONE; minfilter = NONE; mipfilter = NONE; AddressU = mirror; AddressV = mirror;};
// side/over shaders (these make up the "over" technique (pixel shader version 2.0)
VS_Output_Over VShade_Over(VS_Input_Over inp)
{
VS_Output_Over outp = (VS_Output_Over)0;
outp.pos = mul(inp.pos, viewProj);
outp.altPos = outp.pos;
outp.txc = inp.txc;
return outp;
}
PS_Output PShade_Over(VS_Output_Over inp)
{
PS_Output outp = (PS_Output)0;
outp.col = tex2D(texSampler, inp.txc);
return outp;
}
I've looked about for a "Blended Blit" or something but I can't find anything, and other related searches have only brought up forums implying that rendering a quad with an orthographic projection is the way to go about doing this.
Sorry if I've given far too much detail for this issue but it's both interesting and infuriating and any feedback would be greatly appreciated.
It looks for me that you problem is the mapping of texels to pixels. You must offset a screen-aligned quad with a half pixel to match the texels direct to the screenpixels. This issue is explaines here: Directly Mapping Texels to Pixels (MSDN)
For anyone else hitting a similar wall, my specific problem solved by adjusting the U and V values of the verticies sent to the GPU for the overlaid texture triangles thus:
for (int i = 0; i < 4; i++)
{
overVerts[i].tu += 0.5 / (float)ddat->targetVp->Width; // ddat->targetVp is the viewport in use, and the viewport is the same size as the texture
overVerts[i].tv += 0.5 / (float)ddat->targetVp->Height;
}
See Directly Mapping Texels to Pixels as provided by Gnietschow's answer for an explanation as to why this makes sense.

detecting if a gl_LightSource is enabled in glsl compatibility profile

I am writing a GLSL program as part of a plugin running inside of Maya, a closed-source 3D application. Maya uses the fixed function pipeline to define it's lights, so my program has to get it's light information from the gl_LightSource array using the compatibility profile. My light evaluation is working fine (thanks Nicol Bolas) except for one thing, I cannot figure out how to determine if a particular light in the array is enabled or disabled. Here is what I have so far:
#version 410 compatibility
vec3 incidentLight (in gl_LightSourceParameters light, in vec3 position)
{
if (light.position.w == 0) {
return normalize (-light.position.xyz);
} else {
vec3 offset = position - light.position.xyz;
float distance = length (offset);
vec3 direction = normalize (offset);
float intensity;
if (light.spotCutoff <= 90.) {
float spotCos = dot (direction, normalize (light.spotDirection));
intensity = pow (spotCos, light.spotExponent) *
step (light.spotCosCutoff, spotCos);
} else {
intensity = 1.;
}
intensity /= light.constantAttenuation +
light.linearAttenuation * distance +
light.quadraticAttenuation * distance * distance;
return intensity * direction;
}
}
void main ()
{
for (int i = 0; i < gl_MaxLights; ++i) {
if (/* ??? gl_LightSource[i] is enabled ??? */ 1) {
vec3 incident = incidentLight (gl_LightSource[i], position);
<snip>
}
}
<snip>
}
When Maya enables new lights my program works as expected but when Maya disables a previously enabled light, presumably using glDisable (GL_LIGHTi), it's parameters are not reset in the gl_LightSource array and gl_MaxLights obviously does not change, so my program continues to use that stale light information in it's shading computation. Although I am not showing it above, the light colors, for example gl_LightSource[i].diffuse, also continue to have stale non-zero values after they are disabled.
Maya draws all other geometry using the fixed-function pipline (no GLSL) and those objects correctly ignore disabled lights, how can I mimic this behavior in GLSL?
const vec4 AMBIENT_BLACK = vec4(0.0, 0.0, 0.0, 1.0);
const vec4 DEFAULT_BLACK = vec4(0.0, 0.0, 0.0, 0.0);
bool isLightEnabled(in int i)
{
// A separate variable is used to get
// rid of a linker error.
bool enabled = true;
// If all the colors of the Light are set
// to BLACK then we know we don't need to bother
// doing a lighting calculation on it.
if ((gl_LightSource[i].ambient == AMBIENT_BLACK) &&
(gl_LightSource[i].diffuse == DEFAULT_BLACK) &&
(gl_LightSource[i].specular == DEFAULT_BLACK))
enabled = false;
return(enabled);
}
Unfortunately I looked at the GLSL spec and I don't see anything that provides this information. I also saw another thread which seemed to come to the same conclusion.
Is there any way you can modify the light values in your plugin, or add an extra uniform that can be used as an enable/disable flag?

Troubles with Marching Cubes and Texture coordinates

I'm implementing MC algorithm in OpenGL.
Everything went fine, until I reached the point with texture coordinates.
I can't figure out how to implement them!
My progress:
Edit:
What I want to archive is to put some textures on my generated MC triangles.
As far as I understand I need to tell OpenGL uv coordinates, but no idea how to calculate them.
A typical texture coordinate generation algorithms for marching cube algorithms is to use environment mapping.
In short you calculate the vertex-normal at each vertex by averaging the face normals of all adjecting faces, then discard the z-coordinate of the normal and use (x/2+0.5, y/2+0.5) as (u,v) texture-coordinates.
Set up a texture with a nice white spot in the middle and some structure filling the rest of of the texture and you get the terminator-two silver-robot kind of look.
I need to tell OpenGL uv coordinates, but no idea how to calculate them.
You're facing some big problem there: The topology of what comes out of MC can be anything. The topology of a texture in OpenGL is either a (hyper)torus (GL_TEXTURE_1D, GL_TEXTURE_2D, GL_TEXTURE_3D), or a sphere (GL_TEXTURE_CUBE_MAP).
So inevitably you have to cut your surface into so called maps. This is a nontrivial task, but a qood strategy is cutting along regions with high curvature. See the paper
“Least Squares Conformal Maps for Automatic Texture Atlas Generation”
Bruno Lévy, Sylvain Petitjean, Nicolas Ray and Jérome Maillot
http://alice.loria.fr/index.php/publications.html?Paper=lscm#2002
for the dirty details.
The first answer given is partly correct, except you also need to check which plane is best to project from instead of always projecting from the z plane, like this C# Unity example:
Vector2[] getUVs(Vector3 a, Vector3 b, Vector3 c)
{
Vector3 s1 = b - a;
Vector3 s2 = c - a;
Vector3 norm = Vector3.Cross(s1, s2).Normalize(); // the normal
norm.x = Mathf.Abs(norm.x);
norm.y = Mathf.Abs(norm.y);
norm.z = Mathf.Abs(norm.z);
Vector2[] uvs = new Vector2[3];
if (norm.x >= norm.z && norm.x >= norm.y) // x plane
{
uvs[0] = new Vector2(a.z, a.y);
uvs[1] = new Vector2(b.z, b.y);
uvs[2] = new Vector2(c.z, c.y);
}
else if (norm.z >= norm.x && norm.z >= norm.y) // z plane
{
uvs[0] = new Vector2(a.x, a.y);
uvs[1] = new Vector2(b.x, b.y);
uvs[2] = new Vector2(c.x, c.y);
}
else if (norm.y >= norm.x && norm.y >= norm.z) // y plane
{
uvs[0] = new Vector2(a.x, a.z);
uvs[1] = new Vector2(b.x, b.z);
uvs[2] = new Vector2(c.x, c.z);
}
return uvs;
}
Though it is better to do this on the GPU in a shader, especially if you are planning on having very dynamic voxels, such as in an infinitely generated world that's constantly generating around the player or a game with lots of digging and building involved, you wouldn't have to calculate the UVs each time and it's also less data you have to send to the GPU, so it is definitely faster than this. I modified a basic triplanar shader I found on the internet a while ago, unfortunately I wasn't able to find it again, but my modified version is basically a triplanar mapping shader except with no blending and it only samples once per pass, so it should be pretty much as fast as a basic unlit shader and looks exactly the same as the image above. I did this because the normal triplanar shader blending doesn't look good with textures like brick walls at 45 degree angles.
Shader "Triplanar (no blending)"
{
Properties
{
_DiffuseMap("Diffuse Map ", 2D) = "white" {}
_TextureScale("Texture Scale",float) = 1
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
#pragma target 3.0
#pragma surface surf Lambert
sampler2D _DiffuseMap;
float _TextureScale;
struct Input
{
float3 worldPos;
float3 worldNormal;
};
void surf(Input IN, inout SurfaceOutput o)
{
IN.worldNormal.x = abs(IN.worldNormal.x);
IN.worldNormal.y = abs(IN.worldNormal.y);
IN.worldNormal.z = abs(IN.worldNormal.z);
if (IN.worldNormal.x >= IN.worldNormal.z && IN.worldNormal.x >= IN.worldNormal.y) // x plane
{
o.Albedo = tex2D(_DiffuseMap, IN.worldPos.zy / _TextureScale);
}
else if (IN.worldNormal.y >= IN.worldNormal.x && IN.worldNormal.y >= IN.worldNormal.z) // y plane
{
o.Albedo = tex2D(_DiffuseMap, IN.worldPos.xz / _TextureScale);
}
else if (IN.worldNormal.z >= IN.worldNormal.x && IN.worldNormal.z >= IN.worldNormal.y) // z plane
{
o.Albedo = tex2D(_DiffuseMap, IN.worldPos.xy / _TextureScale);
}
}
ENDCG
}
}
It ends up looking a lot like a cubemap, though I don't think this is technically a cubemap as we only use three faces, not six.
EDIT: I later realized that you may want it in the fragment shader like that but for my purposes it works exactly the same and would theoretically be faster in the vertex shader:
Shader "NewUnlitShader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
// make fog work
#pragma multi_compile_fog
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float3 normal : NORMAL;
};
struct v2f
{
float2 uv : TEXCOORD0;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
v.normal.x = abs(v.normal.x);
v.normal.y = abs(v.normal.y);
v.normal.z = abs(v.normal.z);
if (v.normal.x >= v.normal.z && v.normal.x >= v.normal.y) // x plane
{
o.uv = v.vertex.zy;
}
else if (v.normal.y >= v.normal.x && v.normal.y >= v.normal.z) // y plane
{
o.uv = v.vertex.xz;
}
else if (v.normal.z >= v.normal.x && v.normal.z >= v.normal.y) // z plane
{
o.uv = v.vertex.xy;
}
UNITY_TRANSFER_FOG(o, o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
// sample the texture
fixed4 col = tex2D(_MainTex, i.uv);
// apply fog
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
}
ENDCG
}
}
}