passing matrix to the shader?

Hello there.

Another shader problem I’ve got… :frowning:
I need to pass matrix value to my shader.
In order to do, I passed some nodepath as described manual

[Parameter k_anything would match data supplied by the call setShaderInput(“anything”, myNodePath)]

but when I tried this, there’s an error. message was
Assertion failed: Shader input prevModelView is not a vector.

if nodepath is considered as a vector, how can I pass matrix value?
I got stuck in the many shader problems for many days
please help me

thanks

Hi
Unfortunately, setShaderInput (‘anything’, Vec4) has to use a vec4.

So either decompose your matrix into 4 vector or check that you can’t use the standard matrix system to get the one you want:

panda3d.org/manual/index.php/S … ate_Spaces

Actually, it can be a NodePath too. If you want to pass a matrix to the shader, you should set it as the NodePath’s transform and pass that NodePath to setShaderInput.

Looking at your code, I think that’s what you’re doing - but I think you have it in your shader as type float4 instead of float4x4.

OK Here’s my code to test shader(motion blur)
Could you point me what I did wrong?
There must be many errors in it, even I succeed passing matrix value.

Long code short, that’s what I try to pass the matrix value

        self.prevModelViewNP = NodePath(PandaNode("Previous ModelView Matrix"))
        modelview = base.camera.getMat( base.camera )
        self.prevModelViewNP.setMat(modelview)
        self.teapot.setShaderInput('prevModelView',self.prevModelViewNP)
**shader
void vshader( ....,  uniform float4x4 k_prevModelView, ...)

Here’s my full code

class World(DirectObject):
    def __init__(self):
        self.modelBuffer = self.makeFBO("offscreen buffer", 1)

        self.texDepth = Texture()
        self.texDepth.setFormat(Texture.FDepthStencil)
        self.modelBuffer.addRenderTexture(self.texDepth,  GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPDepthStencil)

        base.camLens.setNearFar(1.0,10000)
        base.camLens.setFov(75)
        base.disableMouse()

        self.teapot=loader.loadModel('teapot')
        self.teapot.setScale( 10.0 )
        self.teapot.reparentTo(render)
        self.teapot.setPos(0,-20,10)
        self.teapotMovement = self.teapot.hprInterval(5,Point3(0,360,360))
        self.teapotMovement.loop()

        self.LCam=base.makeCamera(self.modelBuffer)
        self.LCam.node().setScene(render)
        self.LCam.node().getLens().setFov(40)
        self.LCam.node().getLens().setNearFar(10,100)

        base.cam.reparentTo(render)
        base.cam.setPos(30,-45,26)
        base.cam.lookAt(0,0,0)
        self.LCam.node().hideFrustum()

        self.mci = NodePath(PandaNode("Main Camera Initializer"))
        self.prevModelViewNP = NodePath(PandaNode("Previous ModelView Matrix"))
        modelview = base.camera.getMat( base.camera )
        self.prevModelViewNP.setMat(modelview)

        self.prevModelViewProjNP = NodePath(PandaNode("Previous ModelViewProj Matrix"))
        self.prevModelViewProjNP.setMat(modelview*base.camLens.getProjectionMat())
        
        self.mci.setShader(Shader.load('mblur.sha'))
        base.cam.node().setInitialState(self.mci.getState())

        taskMgr.add(self.makeInput, "setShaderInput")

    def makeInput(self, task):
        self.teapot.setShaderInput('prevModelView',self.prevModelViewNP)
        self.teapot.setShaderInput('prevModelViewProj',self.prevModelViewProjNP)
        self.teapot.setShaderInput('halfWindowSize',Vec4(400,300,1,0))
        self.teapot.setShaderInput('sceneTex',self.texDepth)

        modelview = base.camera.getMat( base.camera )
        self.prevModelViewNP.setMat(modelview)
        self.prevModelViewProjNP.setMat(modelview*base.camLens.getProjectionMat())
        return task.cont

    def makeFBO(self, name, auxrgba):
        winprops = WindowProperties()
        props = FrameBufferProperties()
        props.setRgbColor(1)
        props.setAlphaBits(1)
        props.setDepthBits(1)
        props.setAuxRgba(auxrgba)
        return base.graphicsEngine.makeOutput(
             base.pipe, "model buffer", -2,
             props, winprops,
             GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFCanBindEvery | 
             GraphicsPipe.BFRttCumulative | GraphicsPipe.BFRefuseWindow,
             base.win.getGsg(), base.win)

World()
run()

**Shader
//Cg
//
void vshader(float4 vtx_position : POSITION,
             float3 vtx_normal: NORMAL,
             float2 vtx_texcoord0: TEXCOORD0,

			 uniform float4x4 mat_modelview,
			 uniform float4x4 k_prevModelView,
			 uniform float4x4 mat_modelproj,
			 uniform float4x4 k_prevModelViewProj,
			 uniform float4   k_halfWindowSize,
			 
			 out float4 l_position: POSITION,
			 out float4 l_color1: COLOR1,
			 out float3 l_texcoord0: TEXCOORD0,
			 out float3 l_color0: COLOR0
             )
{
  // transform previous and current position to eye space
  float4 P = mul(mat_modelview, vtx_position);
  float4 Pprev = mul(k_prevModelView, vtx_position);

  // transform normal to eye space
  float3 N = mul((float3x3) mat_modelview, vtx_normal);

  // calculate eye space motion vector
  float3 motionVector = P.xyz - Pprev.xyz;

  // calculate window space motion vector
  P = mul(mat_modelproj, vtx_position);
  Pprev = mul(k_prevModelViewProj, vtx_position);

  Pprev = lerp(P, Pprev, 10.0);

  // choose previous or current position based on dot product between motion vector and normal
  float flag = dot(motionVector, N) > 0;
  float4 Pstretch = flag ? P : Pprev;
  l_position = Pstretch;
  l_color1 = Pstretch;

  // do divide by W -> NDC coordinates
  P.xyz = P.xyz / P.w;
  Pprev.xyz = Pprev.xyz / Pprev.w;
  Pstretch.xyz = Pstretch.xyz / Pstretch.w;

  // calculate window space velocity
  float3 dP = (P.xyz - Pprev.xyz) * k_halfWindowSize.xyz;

  l_texcoord0 = dP;
  l_color0.xy = 0.5 + (dP.xy * 0.005);
}

void fshader(	in float4 l_color1: COLOR1,
			    in float3 l_texcoord0 : TEXCOORD0,
			    uniform sampler2D  k_sceneTex,
				out float4 o_color : COLOR
           )
{
  const float samples = 16;

  float2 wpos = l_color1.xy;
  // read velocity from texture coordinate
  float2 velocity = l_texcoord0.xy * 1.0;           

  // sample into scene texture along motion vector
  const float w = 1.0 / samples;  // weight
  o_color = 0;
  for(float i=0; i<samples; i+=1) {
    float t = i / (samples-1);
    o_color = o_color + tex2D(k_sceneTex, wpos + velocity*t) * w;
  }
}

Looks right to me, except that this doesn’t make any sense:

base.camera.getMat( base.camera ) 

You’re getting the transform of a camera relative to the camera itself? That returns the identity matrix.

As for the error, I recall fixing a bug there, when I ran into the issue myself. I don’t remember when - if you’re already running 1.6.2 I’ve probably only checked it into CVS yet.

First, passing a matrix using a nodepath almost works. The matrix however is transformed to be relative to the model when you do this, so it will be different from what you expect unless you parent the nodepath you are stealing the matrix from to the model. On top of this, a lot of precision is lost from transforming to world space and back if I remember correctly, so I just used 4 vectors and remade the matrix in the shader…

Hm. Maybe I should just add a way to directly pass a matrix.

Thanks for replies!
Still there are problems but anyway now I can pass the matrix :slight_smile: