Hi,
I need to render rgb and depth frames on a server, and it seems I need to use p3tinydisplay as engine. My code works fine when using pandagl, but strangely only produces binary (black/white) images using p3tinydisplay. Specifically, the depth is missing and only the silhouette is rendered. Any ideas how to fix this? Code below, simply comment the loadPrcFileData
I am running Panda1.9.2 on Ubuntu 14.04 with latest nvidia drivers on a GTX 980.
Thanks!
import numpy
import os
import scipy.misc
from panda3d.core import loadPrcFileData
from pandac.PandaModules import GraphicsPipe, PerspectiveLens, FrameBufferProperties, TransformState, \
RenderState, GraphicsOutput, Texture, VBase4, WindowProperties, TransparencyAttrib
from direct.showbase.ShowBase import ShowBase
class RGBDRenderer(ShowBase):
def __init__(self, modelFile):
loadPrcFileData("", "audio-library-name null")
loadPrcFileData("", "load-display pandagl")
# loadPrcFileData("", "load-display p3tinydisplay")
loadPrcFileData("", "window-type offscreen")
loadPrcFileData("", "sync-video 0")
loadPrcFileData("", "win-size {} {}".format(128, 128))
ShowBase.__init__(self)
self.disableMouse()
self.taskMgr.remove('igLoop')
# setup buffers
winprops = WindowProperties()
winprops.setTitle("My Window")
winprops.setSize(128, 128)
props = FrameBufferProperties()
props.setRgbColor(1)
props.setColorBits(1)
props.setAlphaBits(1)
props.setDepthBits(1)
props.setRgbaBits(8, 8, 8, 8)
self.displayRegion = self.graphicsEngine.makeOutput(
self.pipe, "depth buffer", 1,
props, winprops,
GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFCanBindEvery |
GraphicsPipe.BFRttCumulative | GraphicsPipe.BFRefuseWindow,
self.win.getGsg(), self.win)
self.buffer_rgb = self.graphicsEngine.makeOutput(
self.pipe, "rgb buffer", 2,
props, winprops,
GraphicsPipe.BFSizeTrackHost | GraphicsPipe.BFCanBindEvery |
GraphicsPipe.BFRttCumulative | GraphicsPipe.BFRefuseWindow,
self.win.getGsg(), self.win)
if self.displayRegion is None or self.buffer_rgb is None:
raise Exception("Failed to create buffers")
self.graphicsEngine.renderFrame()
# create texture for offscreen rendering
self.texDepth = Texture()
self.texRgb = Texture()
self.texDepth.setFormat(Texture.FDepthStencil)
self.texRgb.setFormat(Texture.FRgb)
self.displayRegion.addRenderTexture(self.texDepth,
GraphicsOutput.RTMBindOrCopy,
GraphicsOutput.RTPDepthStencil)
self.buffer_rgb.addRenderTexture(self.texRgb,
GraphicsOutput.RTMBindOrCopy,
GraphicsOutput.RTPColor)
assert self.texDepth == self.displayRegion.getTexture(), "Texture wasn't created properly."
assert self.texRgb == self.buffer_rgb.getTexture(), "Texture wasn't created properly."
# create cameras
self.perspectiveLens = PerspectiveLens()
self.perspectiveLens.setNear(0.1)
self.perspectiveLens.setFar(10)
self.depth_cam = self.makeCamera(self.displayRegion,
lens=self.perspectiveLens, scene=self.render)
self.rgb_cam = self.makeCamera(self.buffer_rgb,
lens=self.perspectiveLens, scene=self.render)
self.depth_cam.setPos(0, 0, 0)
self.rgb_cam.setPos(0, 0, 0)
self.displayRegion.setSort(1)
self.buffer_rgb.setSort(2)
self.buffer_rgb.setClearColorActive(True)
self.buffer_rgb.setClearColor(VBase4(0, 0, 0, 1))
self.win.setSort(3)
self.graphicsEngine.renderFrame()
self.texture_card = self.displayRegion.getTextureCard()
self.texture_card.reparentTo(self.render2d)
self.displayRegion = self.win.makeDisplayRegion()
self.displayRegion.setCamera(self.rgb_cam)
self.graphicsEngine.renderFrame()
if not os.path.isfile(modelFile):
raise Exception("Model-Egg File not found: " + str(modelFile))
objectModel = self.loader.loadModel(modelFile)
objectModel.setTransparency(TransparencyAttrib.MAlpha)
objectModel.reparentTo(self.render)
objectModel.setPos(0, 0, 0)
objectModel.show()
self.objectModel = objectModel
self.graphicsEngine.renderFrame()
# make sure to setup pipeline
self.setPose(0, 1, 2)
self.renderDepth()
def setPose(self, heading=0, pitch=0, roll=0):
self.objectModel.setHpr(heading, pitch, roll)
modelMin, modelMax = self.objectModel.getTightBounds()
dim = modelMax - modelMin
if dim.getX() >= dim.getZ():
dist = (dim.getX() / 2.0) / numpy.tan(numpy.radians(self.rgb_cam.node().getLens().getFov()[0] / 2.0))
else:
dist = (dim.getZ() / 2.0) / numpy.tan(numpy.radians(self.rgb_cam.node().getLens().getFov()[1] / 2.0))
cd = dist + dim.getY()
near = cd - dim.getY()
far = cd + dim.getY()
self.rgb_cam.node().getLens().setNear(near)
self.rgb_cam.node().getLens().setFar(far)
self.depth_cam.node().getLens().setNear(near)
self.depth_cam.node().getLens().setFar(far)
self.rgb_cam.setY(-cd)
self.depth_cam.setY(-cd)
self.graphicsEngine.renderFrame()
def renderRGB(self):
self.texture_card.setTexture(self.texRgb, 1)
TransformState.garbageCollect()
RenderState.garbageCollect()
self.graphicsEngine.renderFrame()
tex = self.displayRegion.getScreenshot()
buf = tex.getRamImage().getData()
col = numpy.flipud(numpy.fromstring(buf, dtype=numpy.uint8).reshape((128, 128, 4)))
return col[:, :, 0:3] # RGB from RGB-A
def renderDepth(self):
self.texture_card.setTexture(self.texDepth, 1)
TransformState.garbageCollect()
RenderState.garbageCollect()
self.graphicsEngine.renderFrame()
tex = self.displayRegion.getScreenshot()
buf = tex.getRamImage().getData()
dpt = numpy.flipud(numpy.fromstring(buf, dtype=numpy.uint32).reshape((128, 128)))
return dpt.astype('float32') / (2 ** 32 - 1)
if __name__ == "__main__":
render = RGBDRenderer('/usr/share/panda3d/samples/bump-mapping/models/icosphere.egg')
render.setPose(10, 10, 20)
col = render.renderRGB()
dpt = render.renderDepth()
scipy.misc.imshow(col)
scipy.misc.imshow(dpt)