keeping the last n offscreen texture frames

I am generating an offscreen texture each frame using a shader, and would like to beable to keep the last n (~= 16) frames in texture memory, for use in another shader.

I currently trying to use a single graphics buffer for this, and binding a new texture each frame with RTMCopyTexture. I keep handles to the n textures, and want to cycle them.

The problem is that whenever I try to disassociate the previous texture from the buffer (using clearRenderTextures()), and associate a new one, the old one doesn’t seem to stay on the graphics card - at least according to base.win.getGsg().getPreparedTextures(). And I just get junk in them.

I’ve also tried just rendering to the same texture, and using makeCopy(), but this seems to copy the RAM image, which I don’t have, because I rendered it in graphics card memory. (Note I’m using the Panda 1.6.2 makeCopy fix)

I should beable to do this right? without having to make n graphics buffers.

Any thoughts?

Hmm, seems like this ought to work, though it’s true it’s something we haven’t tried before. I’ll investigate.

David

Note that if I bind using RTMCopyRam it works fine. But is slow.
I guess that is because it can just reload the texture from ram when it realises it’s not in texture memory.

The following test program works just fine for me. Does it work for you?

from direct.directbase.DirectStart import *
from pandac.PandaModules import *
from direct.actor.Actor import Actor

# Create an offscreen buffer
buffer = base.graphicsEngine.makeOutput(
    base.pipe, 'buffer', -1, FrameBufferProperties(),
    WindowProperties.size(512, 512),
    GraphicsPipe.BFRefuseWindow | GraphicsPipe.BFFbPropsOptional,
    base.win.getGsg(), base.win)

# Detach render from the main window
base.camNode.getDisplayRegion(0).setCamera(NodePath())

# Attach render to the offscreen buffer
dr = buffer.makeDisplayRegion()
dr.setCamera(base.cam)

# Put a walking Panda onstage in the offscreen buffer
base.disableMouse()
base.camera.setPosHpr(-16, -28, 3, -30, 5, 0)
dlnp = base.camera.attachNewNode(DirectionalLight('dl'))
render.setLight(dlnp)

a = Actor('panda.egg', {'walk' : 'panda-walk.egg'})
a.reparentTo(render)
a.loop('walk')

# Create a grid of 16 textures to be displayed under render2d
blank = PNMImage(1, 1)
blank.fill(0, 0, 1)

numX = 4
numY = 4
grid = render2d.attachNewNode('grid')
grid.setScale(2.0 / numX, 1, 2.0 / numY)
grid.setPos(-1, 0, -1)
texes = []
for yi in range(numY):
    for xi in range(numX):
        tex = Texture('tex%s_%s' % (xi, yi))
        tex.load(blank)
        texes.append(tex)
        cm = CardMaker('cm%s_%s' % (xi, yi))
        cm.setFrame(0, 1, 0, 1)
        card = grid.attachNewNode(cm.generate())
        card.setTexture(tex)
        card.setPos(xi, 0, numY - 1 - yi)
grid.flattenStrong()

# Spawn a task to copy the offscreen buffer to a different texture
# each frame.
def changeTex(task):
    i = globalClock.getFrameCount()
    tex = texes[i % len(texes)]
    buffer.clearRenderTextures()
    buffer.addRenderTexture(tex, buffer.RTMCopyTexture)
    return task.cont

taskMgr.add(changeTex, 'changeTex')
run()

You example works fine. But i’m wondering if that is because you are using all 16 textures each frame. However, I only want to use certain past frames. E.g a shader that combines the current and the frame 16 ago.

I made a test example which tries to store the 28 rendered frames of the panda animation in texture memory and then play them back through.
When I play them back I get rubbish in my textures. However, if I use the RTMCopyRam it works fine.

from pandac.PandaModules import *
from direct.actor.Actor import Actor

loadPrcFileData("", "sync-video #t") #sync to screen refresh rate
loadPrcFileData("", "show-buffers #t")

from direct.showbase.DirectObject import DirectObject
import direct.directbase.DirectStart

class World(DirectObject):
    def __init__(self):
        self.a = Actor('panda.egg', {'walk' : 'panda-walk.egg'})

        self.n = self.a.getNumFrames('walk')
        self.theTexture = [None] * self.n
        self.newBuffer = base.graphicsEngine.makeBuffer(base.win.getGsg(), "name", -50, 512, 512)

        # Set up a fullscreen card to set the video texture on.
        self.cm = CardMaker("My Fullscreen Card");
        self.cm.setFrameFullscreenQuad()
        self.card = NodePath(self.cm.generate())
        self.card.reparentTo(render2d)

        #setup rendering the panda to the buffer
        self.scene = NodePath(PandaNode('scene'))
        self.cam = base.makeCamera(self.newBuffer)
        self.cam.node().setScene(self.scene)
        self.cam.setPosHpr(-16, -28, 3, -30, 5, 0)
        dlnp = self.cam.attachNewNode(DirectionalLight('dl'))
        self.scene.setLight(dlnp)

        self.a.reparentTo(self.scene)

        self.gameTask = taskMgr.add(self.gameLoop, "gameLoop")

    def gameLoop(self, task):
        frameIndex = (globalClock.getFrameCount()-1) % self.n #globalClock.getFrameCount() seems to start at 1
        self.checkTexture(frameIndex)
        self.card.setTexture(self.theTexture[frameIndex])
        self.a.pose('walk', frameIndex)
        #print base.win.getGsg().getPreparedTextures()

        return task.cont


    def checkTexture(self, index):
        if self.theTexture[index] == None:
            #Render the frame to the texture
            self.theTexture[index] = Texture(str(index))
            self.theTexture[index].setRenderToTexture(False)
            self.newBuffer.clearRenderTextures()
            #self.newBuffer.addRenderTexture(self.theTexture[index], GraphicsOutput.RTMCopyTexture, GraphicsOutput.RTPColor)
            self.newBuffer.addRenderTexture(self.theTexture[index], GraphicsOutput.RTMCopyRam, GraphicsOutput.RTPColor)
            self.newBuffer.setActive(True)
        else:
            #Do nothing, as the texture should already contain the frame
            self.newBuffer.setActive(False)
        
w = World()
run()

It seems that the textures get released. Is there some way to stop this?

Hmm, your example runs fine for me, with either RTMCopyTexture or RTMCopyRam. What kind of graphics card do you have, what is your operating system, and have you tried it with OpenGL, DirectX, and tinydisplay?

David

I’m running it on a GeForce 9800 GT, with OpenGL on Windows XP.
I tried DirectX 8 and 9 but they just spit out errors.

:display:gsg:dxgsg9(error): SetRenderTarget at (c:\p\p3d\panda3d-1.6.2\panda\src\dxgsg9\wdxGraphicsBuffer9.cxx:477), hr=D3DERR_INVALIDCALL: Invalid call
c:\p\p3d\panda3d-1.6.2\panda\src\dxgsg9\wdxGraphicsBuffer9.cxx 477

If I uncomment the line print base.win.getGsg().getPreparedTextures() I get:
[]
[panda_torso, panda_head, panda_viser, panda_hat, 0]
[panda_torso, panda_head, panda_viser, panda_hat, 1]
[panda_torso, panda_head, panda_viser, panda_hat, 2]

[panda_torso, panda_head, panda_viser, panda_hat, 27]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 27]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 27, 1]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 27, 1, 2]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 27, 1, 2, 3]

[panda_torso, panda_head, panda_viser, panda_hat, 0, 27, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]

Indicating that the textures arn’t staying prepared until the second loop through, in which case they might already be rubbish.

Note that sometimes if I run it with RTMCopyRam then rerun it with RTMCopyTexture it gets it kinda right, because the rubbish texture memory happends have correct looking frames in it from the last time I ran it.

Hang on. I tried “load-display tinydisplay” as you mentioned. And it seems to work.

The prepared texture list goes up correctly straight away.
[]
[panda_torso, panda_head, panda_viser, panda_hat, 0]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 1]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 1, 2]
[panda_torso, panda_head, panda_viser, panda_hat, 0, 1, 2, 3]

[panda_torso, panda_head, panda_viser, panda_hat, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]

Excepty tinyDisplay doesn’t seem to support some of the other features I want to be using at the same time.

hmmm.

This is strange indeed. I don’t know what would causing OpenGL to be releasing the textures. It doesn’t seem like it could be a bug in your driver, since it’s actually getting dropped out of the Panda data structures; so it must be something Panda is doing wrong. But why isn’t it going wrong for me as well?

I’ll have to continue to investigate.

David

It seems to be that whenever clearRenderTextures() is called on those textures get unprepared no matter how much I try to force them to stay prepared.
I wondering does holding a python pointer to a texture, stop the GraphicsOutput::_textures.clear() from calling destructors on the internal PT(Texture) _texture objects? (in its RenderTexture class).

I.e are the C++ PT() pointers compatible with python pointers in the way they reference count etc?

Holding a Python pointer to a Texture object does indeed prevent the PT(Texture) destructor from being called. But you are already doing that: your theTextures array keeps all of the Python Texture objects.

It’s not that the Texture itself is being destructed; it’s that its internal GLTexture object is being released, which you don’t have direct control over. This is only supposed to happen when the Texture or the GSG is destructed, though, or you call releaseTextures() explicitly.

Hmm, there is also a graphics-memory-limit, which if set, marks the threshold above which the GSG will start automatically releasing textures. I doubt you’ve set that accidentally, though; you can check it with “print ConfigVariableInt(‘graphics-memory-limit’)”.

I suspect there might be a bug in 1.6.2, that has been fixed on the cvs trunk (which is where I am running). I haven’t had a chance to download 1.6.2 yet to confirm this, though.

David

Hmm. What happens if you replace your call to makeBuffer() with my call to makeOutput()? The two aren’t exactly equivalent. I think makeBuffer() is not doing the right thing here.

David

Awesome!
I replaced

self.newBuffer = base.graphicsEngine.makeBuffer(base.win.getGsg(), "name", -50, 512, 512)

with

self.newBuffer = base.graphicsEngine.makeOutput(
            base.pipe, 'buffer', -1, FrameBufferProperties(),
            WindowProperties.size(512, 512),
            GraphicsPipe.BFRefuseWindow | GraphicsPipe.BFFbPropsOptional,
            base.win.getGsg(), base.win)

and now it seems to work.

Thankyou so much David

Guys,

Is it possible to save this image buffer as a jpg image, in a harddisk?

It sure is possible. In the future please feel free to make a new topic even for simple questions like this. I feel its better than resurrecting a tangentially related thread.