Hi,
I have a problem of understanding how to merge two code snippets i found in the forum.
On the one hand, I want to render a fisheye view of a scene. For this I found the following snippet, which works great:
Second, I want to render the whole thing for multiple offline cameras (without gui) and store the rendered images in a numpy array. For that I found this code which does exactly that:
Unfortunately I can’t manage to merge the two.
The first thing I tried is to simply replace the “PerspectiveLens” with a “FisheyeLens” in the second code, but there I get the error that the author sal in the first link also had (in his second post).
After that i tried to use the solution from the first link within the code of the second link. But got an error:
ValueError: cannot reshape array of size 0 into shape (512,512,4)
Could someone help me get this working or understanding the problem? I’m new to panda3d and currently reading some tutorials, but have not found some hints yet how to solve it.
My current code looks like this here:
import time
import cv2 as cv
import numpy as np
from panda3d.core import *
from direct.showbase.ShowBase import ShowBase
class SceneSimulator(ShowBase):
def __init__(self):
ShowBase.__init__(self, fStartDirect=True, windowType='offscreen')
self.camera = None
self.buffer = None
self.card = None
def render_frame(self):
self.graphics_engine.render_frame()
def init_camera(self, pos, hpr, name=None):
# set up texture and graphics buffer
window_props = WindowProperties.size(540, 540)
frame_buffer_props = FrameBufferProperties()
self.buffer = self.graphicsEngine.make_output(self.pipe,
f'Image Buffer [{name}]',
-2,
frame_buffer_props,
window_props,
GraphicsPipe.BFRefuseWindow, # don't open a window
self.win.getGsg(),
self.win
)
texture = Texture()
self.buffer.addRenderTexture(texture, GraphicsOutput.RTMCopyRam)
# set up lens according to camera intrinsics
lens = PerspectiveLens()
lens.set_film_size((540, 540))
lens.set_fov(120)
lens.set_near_far(0.1, 100)
self.camera = self.makeCamera(self.buffer, lens=lens, camName=f'Image Camera [{name}]')
self.camera.reparentTo(self.render)
self.camera.setPos(*pos)
self.camera.setHpr(*hpr)
# make fisheye rendering
size = 512
numVertices = 360
rig = self.camera.attachNewNode("rig")
buffer_cube = self.win.makeCubeMap("test", size, rig)
fm = FisheyeMaker('card')
fm.setNumVertices(numVertices)
fm.setSquareInscribed(1, 1.1)
fm.setReflection(True)
self.card = self.render2d.attachNewNode(fm.generate())
self.card.setTexture(buffer_cube.getTexture())
def get_image(self) -> np.ndarray:
tex = self.card.getTexture()
# tex = self.buffer.getTexture() # this works, but does not use the fisheye view
data = tex.getRamImage()
image = np.frombuffer(data, np.uint8)
image.shape = (tex.getYSize(), tex.getXSize(), tex.getNumComponents())
return np.flipud(image)
if __name__ == '__main__':
sim = SceneSimulator()
sim.init_camera((2, 2, 4), (0, 0, 0), name="cam")
sim.camera.lookAt((0, 0, 0), up=(0, 0, 1))
# place box in scene
box = sim.loader.loadModel("models/box")
box.reparentTo(sim.render)
box.setScale(3, 3, 2)
box.setPos(-2, -2, 0)
sim.box = box
start = time.time()
sim.render_frame()
cv.imshow("frame", sim.get_image())
cv.waitKey(0)