I use a shader that has a rotation over time option, and it worked great for years,
But after updating Unity (2017.2 to 2018.2) I get this error- "Shader error in 'Custom/NewSurfaceShader': Too many texture interpolators would be used for ForwardBase pass (11 out of max 10) "
and the material using this shader became white.
I don't know what to do, I looked online but everyone has a different problem
Here is my code:
Shader "Custom/NewSurfaceShader" {
Properties{
//Tint
_Color("Color", Color) = (1,1,1,1)
//Textures and Alphas
_TexOne("Texture One (RGB)", 2D) = "white" {}
_TexTwo("Texture Two (RGB)", 2D) = "white" {}
_AlphaTexOne("Alpha One (A)", 2D) = "white" {}
_AlphaTexTwo("Alpha Two(A)", 2D) = "white" {}
_AlphaTexThree("Alpha Two(A)", 2D) = "white" {}
_Brightness("Brightness", Range(0,5)) = 1
_AlphaWeakness("Alpha Weakness", Range(0,10)) = 1
_ScrollSpeed1X("Scroll Speed Texture One X", Range(-10,10)) = 0
_ScrollSpeed1Y("Scroll Speed Texture One Y", Range(-10,10)) = 0
_ScrollSpeed2X("Scroll Speed Texture Two X", Range(-10,10)) = 0
_ScrollSpeed2Y("Scroll Speed Texture Two Y", Range(-10,10)) = 0
_ScrollSpeedAlpha1X("Scroll Speed Alpha One X", Range(-10,10)) = 0
_ScrollSpeedAlpha1Y("Scroll Speed Alpha One Y", Range(-10,10)) = 0
_ScrollSpeedAlpha2X("Scroll Speed Alpha Two X", Range(-10,10)) = 0
_ScrollSpeedAlpha2Y("Scroll Speed Alpha Two Y", Range(-10,10)) = 0
_RotationSpeed1("Rotation Speed Texture 1", Float) = 0.0
_RotationCenter1("Rotation Center Texture 1", Range(0,1)) = 0.5
_RotationSpeed2("Rotation Speed Texture 2", Float) = 0.0
_RotationCenter2("Rotation Center Texture 2", Range(0,1)) = 0.5
_Speed("Wave Speed", Range(-80, 80)) = 5
_Freq("Frequency", Range(0, 5)) = 2
_Amp("Amplitude", Range(-1, 1)) = 1
}
SubShader{
//Default Queues - Background, Geometry, AlphaTest, Transparent, and Overlay
Tags{ "Queue" = "Transparent" "IgnoreProjector" = "True" "RenderType" = "Transparent" }
LOD 200
CGPROGRAM #pragma surface surf Lambert alpha:fade vertex:vert
//sampler2D _Color;
sampler2D _TexOne;
sampler2D _TexTwo;
sampler2D _AlphaTexOne;
sampler2D _AlphaTexTwo;
sampler2D _AlphaTexThree;
fixed4 _Color;
float _ScrollSpeed1X;
float _ScrollSpeed1Y;
float _ScrollSpeed2X;
float _ScrollSpeed2Y;
float _ScrollSpeedAlpha1X;
float _ScrollSpeedAlpha1Y;
float _ScrollSpeedAlpha2X;
float _ScrollSpeedAlpha2Y;
float _RotationSpeed1;
float _RotationCenter1;
float _RotationSpeed2;
float _RotationCenter2;
float _Brightness;
float _AlphaWeakness;
float _RotationSpeed;
float _Speed;
float _Freq;
float _Amp;
float _OffsetVal;
struct Input {
float2 uv_TexOne;
float2 uv_TexTwo;
float2 uv_AlphaTexOne;
float2 uv_AlphaTexTwo;
float2 uv_AlphaTexThree;
};
void vert(inout appdata_full v) {
float time = _Time * _Speed;
// float waveValueA = sin(time + v.vertex.x * _Freq) * _Amp;
// v.vertex.xyz = float3(v.vertex.x, v.vertex.y + waveValueA, v.vertex.z);
// v.normal = normalize(float3(v.normal.x + waveValueA, v.normal.y, v.normal.z));
}
// This is the only code you need to touch
void surf(Input IN, inout SurfaceOutput o) {
//Rotation
float sinX, cosX, sinY;
float2x2 rotationMatrix;
sinX = sin(_RotationSpeed1 * _Time);
cosX = cos(_RotationSpeed1 * _Time);
sinY = sin(_RotationSpeed1 * _Time);
rotationMatrix = float2x2(cosX, -sinX, sinY, cosX);
//Center the rotation point and apply rotation
IN.uv_TexOne.xy -= _RotationCenter1;
IN.uv_TexOne.xy = mul(IN.uv_TexOne.xy, rotationMatrix);
IN.uv_TexOne.xy += _RotationCenter1;
sinX = sin(_RotationSpeed2 * _Time);
cosX = cos(_RotationSpeed2 * _Time);
sinY = sin(_RotationSpeed2 * _Time);
rotationMatrix = float2x2(cosX, -sinX, sinY, cosX);
//Center the rotation point and apply rotation
IN.uv_TexTwo.xy -= _RotationCenter2;
IN.uv_TexTwo.xy = mul(IN.uv_TexTwo.xy, rotationMatrix);
IN.uv_TexTwo.xy += _RotationCenter2;
//Scrolling
IN.uv_TexOne.x -= _ScrollSpeed1X * _Time;
IN.uv_TexOne.y -= _ScrollSpeed1Y * _Time;
IN.uv_TexTwo.x -= _ScrollSpeed2X * _Time;
IN.uv_TexTwo.y -= _ScrollSpeed2Y * _Time;
IN.uv_AlphaTexOne.x -= _ScrollSpeedAlpha1X * _Time;
IN.uv_AlphaTexOne.y -= _ScrollSpeedAlpha1Y * _Time;
IN.uv_AlphaTexTwo.x -= _ScrollSpeedAlpha2X * _Time;
IN.uv_AlphaTexTwo.y -= _ScrollSpeedAlpha2Y * _Time;
//Textures
fixed4 c1 = tex2D(_TexOne, IN.uv_TexOne) * (_Color * _Brightness); // This is your color texture
fixed4 c2 = tex2D(_TexTwo, IN.uv_TexTwo) * (_Color * _Brightness); // This is your color texture
//Alphas
fixed4 a1 = tex2D(_AlphaTexOne, IN.uv_AlphaTexOne); // This is your alpha texture
fixed4 a2 = tex2D(_AlphaTexTwo, IN.uv_AlphaTexTwo); // This is your alpha texture
fixed4 a3 = tex2D(_AlphaTexThree, IN.uv_AlphaTexThree); // This is your alpha texture
//Assignment
o.Albedo = (c1.rgb * c2.rgb * 2); // Setting your color from the one texture
o.Alpha = ((a1.a * a2.a * 2) * a3.a * 2) *_AlphaWeakness; // Setting your alpha from the other texture
}
ENDCG
}
}
Straightforward solution: target your shader for newer platform (3.5 or higher) by adding
#pragma target 3.5 after CGPROGRAM:
CGPROGRAM #pragma surface surf Lambert alpha:fade vertex:vert
#pragma target 3.5
This is because in shader model 3.0 maximum 10 interpolators are available, i.e. your Input structure may have maximum 10 float fields. Now your structure has exactly 10 (each float2 is 2), but don't forget that the engine may have some internal interpolations that are done behind the scenes and do not come from your input data. This is the case and as a result you have 11 interpolators.
If you target older platforms, you will need to think how to optimize your shader, as there are too many field in Input structure. For example, do you really need 3 alpha channels? Do you use it all? Maybe remove uv_AlphaTexThree?
Related
I'm trying to load in a triangle mesh from an .off file and show the triangle mesh centered at the origin and scaled to fit in the unit cube. But for some reason I'm off by a large factor and it looks like
The way I'm doing this is finding the extrema of the mesh, and using that to offset the surface by that amount.
float avgX = (maxX + minX) / 2;
float avgY = (maxY + minY) / 2;
float avgZ = (maxZ + minZ) / 2;
Vector3f center(avgX, avgY, avgZ);
Vector3f offset = Vector3f(0, 0, 0) - center;
Translation3f translation(offset);
cout << "offset is: " << endl << offset << endl;
double d_theta = (M_PI / 180);
AngleAxisf rotation(d_theta, Vector3f(0, 0, 1));
float scaleX = (float) 1 / (abs(maxX - minX));
float scaleY = (float) 1 / (abs(maxY - minY));
float scaleZ = (float) 1 / (abs(maxZ - minZ));
AlignedScaling3f scale = AlignedScaling3f(scaleX, scaleY, scaleZ);
I then put it into a vector of surfaces with
Vector3f translatedCenter = translation * rotation * scale * center;
VertexBufferObject VBO;
VBO.init();
VBO.update(Vertices);
program.bindVertexAttribArray("position", VBO);
VertexBufferObject VBO_N;
VBO_N.init();
VBO_N.update(FlatNormals);
program.bindVertexAttribArray("normals", VBO_N);
cout << "updated normals" << endl;
VertexBufferObject VBO_C;
VBO_C.init();
VBO_C.update(C);
program.bindVertexAttribArray("color",VBO_C);
cout << "updated color " << endl;
Surface* s = new Surface(VBO, Vertices, translation, rotation, scale, percentScale, translatedCenter, SmoothNormals, FlatNormals, C);
And I pass it to the Vertex Shader as "model"
Affine3f model = s->getTranslation() * s->getRotation() * s->getScale();
glUniformMatrix4fv(program.uniform("model"), 1, GL_FALSE, model.data());
This is all being done using the Eigen library (https://eigen.tuxfamily.org/dox/group__TutorialGeometry.html#TutorialGeoTransform)
No matter what I try I'm off by a little bit. What am I doing wrong?
Swap translation and rotation:
Affine3f model = s->getRotation() * s->getTranslation() * s->getScale();
Note, the translation moves the center of the object to the center of the view. After that the rotation matrix rotates around the this center.
If you don't have any projection matrix, then the view space is the normalized device space where each coordinate is in range [-1, 1]. This mean the length of a side is 2 = 1 - (-1). You have to respect this when you calculate the scale:
float scaleX = (float) 2 / (abs(maxX - minX));
float scaleY = (float) 2 / (abs(maxY - minY));
float scaleZ = (float) 2 / (abs(maxZ - minZ));
In a vertex i give pointSize a value bigger than 1. Say 15.
In the fragment i would like choose a point inside that 15x15 square :
vec2 sprite = gl_PointCoord;
if (sprite.s == (9. )/15.0 ) discard ;
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
But that does not work when Size is not a power of 2.
(if size is 16, so (sprite.s == a/16.) where a is in 1..16 : Perfect !)
is a way to achieve my purpose where size is not of power of 2 ?
edit : i know the solution with a texture of size : PointSize * PointSize
gl_FragColor = texture2D(tex, gl_PointCoord);
but that not fit for dynamic change
edit 26 july :
first I do not understand why it is easier to read in a float texture using webgl2 rather than webgl. For my part I make an ext = gl.getExtension ("OES_texture_float"); and the gl.readpixel uses the same syntax.
Then, it is certain that I did not understand everything but I tried the solution s = 0.25 and s = 0.75 for a correctly centered 2x2 pixel, and that does not seem to work.
On the other hand, the values: 0.5 and 1.0 give me a correct display (see fiddle 1)
(fiddle 1) https://jsfiddle.net/3u26rpf0/274/
In fact, to accurately display any size vertex (say SIZE) I use the following formula:
float size = 13.0;
float nby = floor ((size) /2.0);
float nbx = floor ((size-1.0) /2.0);
//
// <nby> pixels CENTER <nbx> pixels
//
// if size is odd nbx == nby
// if size is even nbx == nby +1
vec2 off = 2. * vec2 (nbx, nby) / canvasSize;
vec2 p = -1. + (2. * (a_position.xy * size) + 1.) / canvasSize + off;
gl_Position vec4 = (p, 0.0,1.0);
gl_PointSize = size;
https://jsfiddle.net/3u26rpf0/275/
Checking for exact values with floating point numbers is not generally a good idea. Check for range
sprite.s > ??? && sprite.s < ???
Or better yet consider using a mask texture or something more flexible than a hard coded if statement.
Otherwise in WebGL pixels are referred to by their centers. So, if you draw a 2x2 point on pixel boundary then these should be the .s values for gl_PointCoord.
+-----+-----+
| .25 | .75 |
| | |
+-----+-----+
| .25 | .75 |
| | |
+-----+-----+
If you draw it off a pixel boundary then it depends
++=====++=====++======++
|| || || ||
|| +------+------+ ||
|| | | | ||
++==| | |===++
|| | | | ||
|| +------+------+ ||
|| | | | ||
++==| | |===++
|| | | | ||
|| +------+------+ ||
|| || || ||
++=====++=====++======++
It will still only draw 4 pixels (the 4 that are closest to where the point lies) but it will choose different gl_PointCoords as though it could draw on fractional pixels. If we offset gl_Position so our point is over by .25 pixels it still draws the exact same 4 pixels as when pixel aligned since an offset of .25 is not enough move it from drawing the same 4 pixels we can guess it's going to offset gl_PointCoord by -.25 pixels (in our case that's for a 2x2 point that's an offset of .125 so (.25 - -.125) = .125 and (.75 - .125) = .675.
We can test what WebGL is using by writing them into a floating point texture using WebGL2 (since it's easier to read the float pixels back in WebGL2)
function main() {
const gl = document.createElement("canvas").getContext("webgl2");
if (!gl) {
return alert("need WebGL2");
}
const ext = gl.getExtension("EXT_color_buffer_float");
if (!ext) {
return alert("need EXT_color_buffer_float");
}
const vs = `
uniform vec4 position;
void main() {
gl_PointSize = 2.0;
gl_Position = position;
}
`;
const fs = `
precision mediump float;
void main() {
gl_FragColor = vec4(gl_PointCoord.xy, 0, 1);
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const width = 2;
const height = 2;
// creates a 2x2 float texture and attaches it to a framebuffer
const fbi = twgl.createFramebufferInfo(gl, [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST, },
], width, height);
// binds the framebuffer and set the viewport
twgl.bindFramebufferInfo(gl, fbi);
gl.useProgram(programInfo.program);
test([0, 0, 0, 1]);
test([.25, .25, 0, 1]);
function test(position) {
twgl.setUniforms(programInfo, {position});
gl.drawArrays(gl.POINTS, 0, 1);
const pixels = new Float32Array(width * height * 4);
gl.readPixels(0, 0, 2, 2, gl.RGBA, gl.FLOAT, pixels);
console.log('gl_PointCoord.s at position:', position.join(', '));
for (y = 0; y < height; ++y) {
const s = [];
for (x = 0; x < width; ++x) {
s.push(pixels[(y * height + x) * 4]);
}
console.log(`y${y}:`, s.join(', '));
}
}
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
The formula for what gl_PointCoord will be is in the spec section 3.3
so following that a point drawn .25 pixels off of a 0 pixel boundary for a 2 pixel width point
drawing a 2x2 at .25,.25 (slightly off center)
// first pixel
// this value is constant for all pixels. It is the unmodified
// **WINDOW** coordinate of the **vertex** (not the pixel)
xw = 1.25
// this is the integer pixel coordinate
xf = 0
// gl_PointSize
size = 2
s = 1 / 2 + (xf + 1 / 2 - xw) / size
s = .5 + (0 + .5 - 1.25) / 2
s = .5 + (-.75) / 2
s = .5 + (-.375)
s = .125
which is the value I get from running the sample above.
xw is the window x coordinate for the vertex. In other words xw is based on what we set gl_Position to so
xw = (gl_Position.x / gl_Position.w * .5 + .5) * canvas.width
Or more specificially
xw = (gl_Position.x / gl_Position.w * .5 + .5) * viewportWidth + viewportX
Where viewportX and viewportWidth are set with gl.viewport(x, y, width, height) and default to the same size as the canvas.
I have a basic stencil shadow functioning in my game engine. I'm trying to deform the shadow based on the lighting direction, which I have:
/*
* #brief Applies translation, rotation and scale for the shadow of the specified
* entity. In order to reuse the vertex arrays from the primary rendering
* pass, the shadow origin must transformed into model-view space.
*/
static void R_RotateForMeshShadow_default(const r_entity_t *e) {
vec3_t origin, delta;
if (!e) {
glPopMatrix();
return;
}
R_TransformForEntity(e, e->lighting->shadow_origin, origin);
VectorSubtract(e->lighting->shadow_origin, e->origin, delta);
const vec_t scale = 1.0 + VectorLength(delta) / LIGHTING_MAX_SHADOW_DISTANCE;
/*const vec_t dot = DotProduct(e->lighting->shadow_normal, e->lighting->dir);
const vec_t sy = sin(Radians(e->angles[YAW]));
const vec_t cy = cos(Radians(e->angles[YAW]));*/
glPushMatrix();
glTranslatef(origin[0], origin[1], origin[2] + 1.0);
glRotatef(-e->angles[PITCH], 0.0, 1.0, 0.0);
glScalef(scale, scale, 0.0);
}
I've commented out the dot product of the ground plane (shadow_normal) and lighting direction, as well as the sin and cos of the yaw of the model, because while I'm pretty sure they are what I need to augment the scale of the shadow, I don't know what the correct formula is to yield a perspective-correct deformation. For someone who better understands projections, this is probably child's play.. for me, I'm stabbing in the dark.
I was eventually able to achieve the desired effect by managing my own matrices and adapting code from the SGI's OpenGL Cookbook. The code uses LordHavoc's matrix library from his DarkPlaces Quake engine. Inline comments call out the major steps. Here's the full code:
/*
* #brief Projects the model view matrix for the given entity onto the shadow
* plane. A perspective shear is then applied using the standard planar shadow
* deformation from SGI's cookbook, adjusted for Quake's negative planes:
*
* ftp://ftp.sgi.com/opengl/contrib/blythe/advanced99/notes/node192.html
*/
static void R_RotateForMeshShadow_default(const r_entity_t *e, r_shadow_t *s) {
vec4_t pos, normal;
matrix4x4_t proj, shear;
vec_t dot;
if (!e) {
glPopMatrix();
return;
}
const cm_bsp_plane_t *p = &s->plane;
// project the entity onto the shadow plane
vec3_t vx, vy, vz, t;
Matrix4x4_ToVectors(&e->matrix, vx, vy, vz, t);
dot = DotProduct(vx, p->normal);
VectorMA(vx, -dot, p->normal, vx);
dot = DotProduct(vy, p->normal);
VectorMA(vy, -dot, p->normal, vy);
dot = DotProduct(vz, p->normal);
VectorMA(vz, -dot, p->normal, vz);
dot = DotProduct(t, p->normal) - p->dist;
VectorMA(t, -dot, p->normal, t);
Matrix4x4_FromVectors(&proj, vx, vy, vz, t);
glPushMatrix();
glMultMatrixf((GLfloat *) proj.m);
// transform the light position and shadow plane into model space
Matrix4x4_Transform(&e->inverse_matrix, s->illumination->light.origin, pos);
pos[3] = 1.0;
const vec_t *n = p->normal;
Matrix4x4_TransformPositivePlane(&e->inverse_matrix, n[0], n[1], n[2], p->dist, normal);
// calculate shearing, accounting for Quake's negative plane equation
normal[3] = -normal[3];
dot = DotProduct(pos, normal) + pos[3] * normal[3];
shear.m[0][0] = dot - pos[0] * normal[0];
shear.m[1][0] = 0.0 - pos[0] * normal[1];
shear.m[2][0] = 0.0 - pos[0] * normal[2];
shear.m[3][0] = 0.0 - pos[0] * normal[3];
shear.m[0][1] = 0.0 - pos[1] * normal[0];
shear.m[1][1] = dot - pos[1] * normal[1];
shear.m[2][1] = 0.0 - pos[1] * normal[2];
shear.m[3][1] = 0.0 - pos[1] * normal[3];
shear.m[0][2] = 0.0 - pos[2] * normal[0];
shear.m[1][2] = 0.0 - pos[2] * normal[1];
shear.m[2][2] = dot - pos[2] * normal[2];
shear.m[3][2] = 0.0 - pos[2] * normal[3];
shear.m[0][3] = 0.0 - pos[3] * normal[0];
shear.m[1][3] = 0.0 - pos[3] * normal[1];
shear.m[2][3] = 0.0 - pos[3] * normal[2];
shear.m[3][3] = dot - pos[3] * normal[3];
glMultMatrixf((GLfloat *) shear.m);
Matrix4x4_Copy(&s->matrix, &proj);
}
The full implementation of this lives here:
https://github.com/jdolan/quake2world/blob/master/src/client/renderer/r_mesh_shadow.c
Using Monotouch and OpenTK I am trying to get the screen coordinate of one 3D point. I have my world view projection matrix set up, and OpenGL makes sense of it and projects my 3D model perfectly, but how to use the same matrix to project just one point from 2D to 3D?
I thought I could simply use:
Vector3.Transform(ref input3Dpos, ref matWorldViewProjection, out projected2Dpos);
Then have the projected screen coordinate in projected2DPos. But the resulting Vector4 does not seem to represent the proper projected screen coordinate. And I do not know how to calculate it from there on.
I found I need to divide by Vector4.w, however I am still getting the wrong values. Using this method now:
private static bool GluProject(OpenTK.Vector3 objPos, OpenTK.Matrix4 matWorldViewProjection, int[] viewport, out OpenTK.Vector3 screenPos)
{
OpenTK.Vector4 _in;
_in.X = objPos.X;
_in.Y = objPos.Y;
_in.Z = objPos.Z;
_in.W = 1f;
Vector4 _out = OpenTK.Vector4.Transform(_in, matWorldViewProjection);
if (_out.W <= 0.0)
{
screenPos = OpenTK.Vector3.Zero;
return false;
}
_out.X /= _out.W;
_out.Y /= _out.W;
_out.Z /= _out.W;
/* Map x, y and z to range 0-1 */
_out.X = _out.X * 0.5f + 0.5f;
_out.Y = -_out.Y * 0.5f + 0.5f;
_out.Z = _out.Z * 0.5f + 0.5f;
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
screenPos.X = _out.X;
screenPos.Y = _out.Y;
screenPos.Z = _out.Z;
return true;
}
I cannot see any errors though... :S
In the first question you're missing the last step: Mapping from NDC (Normalized Device Coordinates) to viewport coordinates. That's what the lines
/* Map x,y to viewport */
_out.X = _out.X * viewport[2] + viewport[0];
_out.Y = _out.Y * viewport[3] + viewport[1];
in your GluProject do,
You have two options. You can calculate it yourself, or use the glProject function. I prefer the first.
Number 1:
private Vector2 Convert(
Vector3 pos,
Matrix4 viewMatrix,
Matrix4 projectionMatrix,
int screenWidth,
int screenHeight)
{
pos = Vector3.Transform(pos, viewMatrix);
pos = Vector3.Transform(pos, projectionMatrix);
pos.X /= pos.Z;
pos.Y /= pos.Z;
pos.X = (pos.X + 1) * screenWidth / 2;
pos.Y = (pos.Y + 1) * screenHeight / 2;
return new Vector2(pos.X, pos.Y);
}
Number 2:
public Vector2 form3Dto2D(Vector3 our3DPoint)
{
Vector3 our2DPoint;
float[] modelviewMatrix = new float[16];
float[] projectionMatrix = new float[16];
int[] viewport = new int[4];
GL.GetFloat(GetPName.ModelviewMatrix, modelviewMatrix);
GL.GetFloat(GetPName.ProjectionMatrix, projectionMatrix);
GL.GetInteger(GetPName.Viewport, viewport);
OpenTK.Graphics.Glu.Project(our3DPoint, convertFloatsToDoubles(modelviewMatrix),
convertFloatsToDoubles(projectionMatrix), viewport, out our2DPoint);
return new Vector2(our2DPoint.X, our2DPoint.Y)
}
public static double[] convertFloatsToDoubles(float[] input)
{
if (input == null)
{
return null; // Or throw an exception - your choice
}
double[] output = new double[input.Length];
for (int i = 0; i < input.Length; i++)
{
output[i] = input[i];
}
return output;
}
This question is in continuation of "why D3DXCreateCylinder is not creating a cylinder?". I m able to draw the cylinder but it is only drawing it as .
The code is as follows
void draw_Cylinder(void){
D3DXMATRIX rot_matrix;
D3DXMATRIX trans_matrix;
D3DXMATRIX world_matrix;
static float rot_triangle=0.0f;
static float rot_triangle2=0.0f;
D3DXMatrixRotationY(&rot_matrix,rot_triangle); //Rotate the cylinder
D3DXMatrixRotationX(&rot_matrix,rot_triangle2); //Rotate the cylinder
D3DXMatrixTranslation(&trans_matrix,2.0f,0,20.0f); //Shift it 2 units to the left
D3DXMatrixMultiply(&world_matrix,&rot_matrix,&trans_matrix);
D3DMATERIAL9 material;// = new D3DMATERIAL9();
ZeroMemory( &material, sizeof(D3DMATERIAL9) );
// Set the RGBA for diffuse reflection.
material.Diffuse.r = 0.5f;
material.Diffuse.g = 0.0f;
material.Diffuse.b = 0.5f;
material.Diffuse.a = 1.0f;
// Set the RGBA for ambient reflection.
material.Ambient.r = 0.5f;
material.Ambient.g = 0.0f;
material.Ambient.b = 0.5f;
material.Ambient.a = 1.0f;
// Set the color and sharpness of specular highlights.
material.Specular.r = 1.0f;
material.Specular.g = 1.0f;
material.Specular.b = 1.0f;
material.Specular.a = 1.0f;
material.Power = 2.0f;
// Set the RGBA for emissive color.
material.Emissive.r = 0.0f;
material.Emissive.g = 0.0f;
material.Emissive.b = 0.0f;
material.Emissive.a = 0.0f;
g_d3d_device->SetMaterial(&material);
g_d3d_device->SetTexture(0,NULL);
g_d3d_device->SetTransform(D3DTS_WORLD,&world_matrix);
m_ppMeshCylinder->DrawSubset(0);
////Render from our Vertex Buffer
//g_d3d_device->DrawPrimitive(D3DPT_TRIANGLELIST, //PrimitiveType
// 0, //StartVertex
// g_pyramid_count); //PrimitiveCount
rot_triangle+=0.0007f;
if(rot_triangle > D3DX_PI*2)
{
rot_triangle-=D3DX_PI*2;
}
rot_triangle2+=0.0007f;
if(rot_triangle2 > D3DX_PI*2)
{
rot_triangle2-=D3DX_PI*2;
}
}
or download the project.
I have attracted my codes here "project code"
I want to draw it having 3D shades what generally any 3D mesh has if rendered by default.
I am not nicely aware of materials. Or is it the problem with graphics card (I just thought :D ).
In addition where can I get information and samples abt SetRenderState
Try
g_d3d_device->SetRenderState( D3DRS_DIFFUSEMATERIALSOURCE, D3DMCS_MATERIAL );
At the moment it is defaulting to using "Color 1" which is the first of the 2 possible vertex colours.