Hi,
Been a while since I posted here. Lurking every-know and then… anyways
I was busy with some other ideas and thought about doing this LLM AI thing with in Panda3d since Ollama [Locally] have a pretty nice Python Library…
*Uhm if I’m stepping on any toes let me know and I’ll remove whatever I need to remove…
Okay so Here is a snipped of code… and guess what its also generated by AI haha well, assisted… Prompt brute forcing it! - jk
The Example is based on a AI NPC idea with a Pirate theme. VERY basic stuff.
The AI model generate a response based on some predefined starting points. When the response is received its parsed and from it comes the next 4 choices. Hopefully after 9 conversation depth the conversation ends not sure this works but anyways something in that line. Its supposed to demo the idea of using the library.
Codes:
from direct.gui.DirectButton import DirectButton
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.ShowBase import ShowBase
from panda3d.core import TextNode, NodePath, ColorAttrib, VBase4
import threading
import ollama
from panda3d.core import loadPrcFileData
# Set window size before initializing Panda3D
loadPrcFileData('', 'win-size 1368 768')
# Config
OLLAMA_MODEL = "granite3.3:2b"
DEFAULT_OLLAMA_SETTINGS = {
"temperature": 0.7,
"top_p": 1,
}
# Developer Prompt
NPC_GAME_PROMPT = (
"You are a cheerful pirate living in the magical world of Panda3D, filled with adventures and treasures. "
"Stay completely in character, never break immersion, and talk only about pirate life and the 3D world."
)
# Developer defines first options
INITIAL_PLAYER_CHOICES = [
"Ahoy matey! Got any treasure maps to sell?",
"Care to share a tale from the high seas?",
"Seen any cursed ships around these waters?",
"What's the best tavern in this port?"
]
MAX_CONVO_DEPTH = 9 # How deep the conversation can go
class Panda3DInterface(ShowBase):
def __init__(self):
super().__init__()
self.convo_depth = 0
self.conversation_history = [] # Store messages
# Remove 3D scene
self.scene = None # No environment model
# Set up camera
self.camera.setPos(0, -10, 5)
self.camera.lookAt(0, 0, 0)
# Create the UI for the game
self.create_ui()
def create_ui(self):
"""Set up UI with improved styling."""
# Game title
self.label = OnscreenText(
text="Talk to the Pirate!",
scale=0.1,
pos=(0, 0.75),
fg=(1, 1, 1, 1),
align=TextNode.ACenter,
font=self.loader.loadFont("cmr12"),
shadow=(0.2, 0.2, 0.2, 1),
wordwrap=12
)
self.choice_buttons = []
# Button to start conversation with more appealing design
self.button = DirectButton(
text=("Start Talking"),
scale=0.15,
pos=(0, 0, -0.85),
command=self.start_conversation,
frameColor=(0.2, 0.5, 0.8, 1), # Background color for button
text_scale=1,
text_fg=(1, 1, 1, 1),
borderWidth=(0.1, 0.1),
relief='raised'
)
# Backgrounds for NPC and Player with more visually appealing setup
self.npc_bg = self.create_background(-0.7, 0.6)
self.player_bg = self.create_background(0.7, 0.6)
def create_background(self, x_pos, y_pos):
"""Create a simple background for text display with rounded corners."""
bg = NodePath("background")
bg.setPos(x_pos, 0, y_pos)
bg.setColor(0.1, 0.1, 0.1, 0.8) # Darker semi-transparent background
bg.setScale(0.6, 1, 0.3)
bg.reparentTo(self.render2d)
return bg
def start_conversation(self):
"""Begin interaction with initial choices."""
self.button["state"] = "disabled"
self.clear_choices()
self.update_choices(INITIAL_PLAYER_CHOICES)
def select_choice(self, choice_text):
"""Handle player selecting a choice."""
if self.convo_depth >= MAX_CONVO_DEPTH:
self.update_label("The pirate tips his hat and sails away into the sunset!")
self.clear_choices()
return
self.conversation_history.append({"role": "user", "content": choice_text})
self.update_label(f"You: {choice_text}", player_side=True)
self.clear_choices()
threading.Thread(target=self.query_npc_response, args=(choice_text,), daemon=True).start()
def query_npc_response(self, player_input):
"""Query the AI for response and next player options."""
try:
prompt = (
f"{NPC_GAME_PROMPT}\n\n"
f"Conversation so far:\n"
)
for msg in self.conversation_history[-6:]: # Keep last few to avoid overload
prompt += f"{msg['role'].capitalize()}: {msg['content']}\n"
prompt += (
f"\nPirate, respond to the player's latest message in a friendly pirate style. "
"Then suggest 4 possible responses the player could say next, numbered 1-4."
)
# Check if the player's input matches any of the specific keywords
if any(keyword in player_input.lower() for keyword in ["riddle", "story", "place", "myth"]):
prompt += " Please give a detailed and extended response with a story or explanation."
response = ollama.chat(
model=OLLAMA_MODEL,
options=DEFAULT_OLLAMA_SETTINGS,
messages=[{"role": "user", "content": prompt}]
)
full_content = response['message']['content'].strip()
# Split pirate's reply and player options
pirate_speech, *choices_section = full_content.split("\n1.")
pirate_speech = pirate_speech.strip()
choices_text = "1." + "1.".join(choices_section).strip() # Reattach 1.
player_choices = self.parse_choices(choices_text)
self.conversation_history.append({"role": "assistant", "content": pirate_speech})
self.update_label(f"Pirate: {pirate_speech}", player_side=False)
self.convo_depth += 1
self.update_choices(player_choices)
except Exception as e:
print(f"[!] LLM query failed: {e}")
self.update_label("(The pirate is tongue-tied)")
self.button["state"] = "normal"
def parse_choices(self, content):
"""Parse 1-4 numbered choices."""
lines = content.split('\n')
choices = []
for line in lines:
if line.strip() and (line.strip()[0] in "1234"):
choice_text = line.split('.', 1)[-1].strip()
choices.append(choice_text)
return choices[:4]
def update_label(self, text, player_side=True):
"""Update text on screen."""
def update_task(task):
if player_side:
self.label.setPos(0.8, 0.8) # Player response (stay at -0.5)
else:
self.label.setPos(-0.8, 0.8) # NPC response (move to 0.8 to fix cutoff)
self.label.setText(text)
return task.done
self.taskMgr.add(update_task, 'update-label-task')
def update_choices(self, choices):
"""Show choice buttons with improved spacing and appearance."""
def update_task(task):
if not choices:
self.update_label("(No responses)")
self.button["state"] = "normal"
return task.done
start_y = 0.8
spacing = 0.18 # Slightly reduced spacing
for idx, choice in enumerate(choices):
btn = DirectButton(
text=choice,
scale=0.07,
pos=(0.8, 0, start_y - idx * spacing),
command=self.select_choice,
extraArgs=[choice],
text_align=TextNode.ACenter,
text_wordwrap=18,
frameColor=(0.1, 0.4, 0.6, 1), # Button background color
borderWidth=(0.1, 0.1),
relief='raised'
)
self.choice_buttons.append(btn)
return task.done
self.taskMgr.add(update_task, 'update-choices-task')
def clear_choices(self):
"""Destroy existing choice buttons."""
for btn in self.choice_buttons:
btn.destroy()
self.choice_buttons = []
if __name__ == "__main__":
app = Panda3DInterface()
app.run()
I’m running Ollama [Locally] and using a tiny model because I have an awesome GTX970 4GB Card (I adore it). The model I used during this example test was granite 3.3 2b. The responses was pretty quick once it was on the go. I used some smaller models as well with much better responses like gemma3:1b at 815 MB which is really quick. But I’m sure there is code that can be done better. Haven’t bothered checking it to that point just needed it to work.
Anyways I don’t know if its a nice idea or a shitty one but whatever there is the code have fun.
Idea about the processes:
And some early morning video recording converted to a gif yay… 2b 1.5GB model in the gif.. and some horrible quality.
Silly stuff…
Anyways I’m sure there is more you can do. Thinking about it I want to give a Model like a local LLM a 3D AI node to have as a “Player Character” in the 3d World… I’m sure using something like player side prediction logic or even simpler can be used to move the player around based on the response from the model + the latency anyway mumbo jumbo
nite nite