MOV with alpha channel

Hi guys…i was wondering if there’s any way to work around having to export a second grayscale video when i want to reproduce video with alpha channel…(to prevent performance issues)? or do i need to add 9 alphas and call it after media (it’s commented)

Thanks in advance

from panda3d.core import * 
loadPrcFileData("", "textures-power-2 none") 
loadPrcFileData("", "audio-library-name p3openal_audio") 
from direct.showbase.DirectObject import DirectObject 
from direct.gui.OnscreenText import OnscreenText 
import direct.directbase.DirectStart 
from direct.gui.OnscreenImage import OnscreenImage
from button import *

# The name of the media file. 
MEDIAFILE1="flower1.mov" 
MEDIAFILE2="flower2.mov" 
MEDIAFILE3="flower3.mov"
MEDIAFILE4="flower4.mov"
MEDIAFILE5="flower5.mov"
MEDIAFILE6="flower6.mov"
MEDIAFILE7="flower7.mov"
MEDIAFILE8="flower8.mov"
MEDIAFILE9="flower9.mov"

SOUND1="flower1.wav" 
SOUND2="flower2.wav" 
SOUND3="flower3.wav"
SOUND4="flower4.wav"
SOUND5="flower5.wav" 
SOUND6="flower6.wav" 
SOUND7="flower7.wav" 
SOUND8="flower8.wav" 
SOUND9="flower9.wav" 

SEQUENCES=[{'p':True,'o':True,'i':False,'u':False,'l':False,'k':True,'j':False,'h':True},
{'p':False,'o':False,'i':True,'u':True,'l':False,'k':False,'j':False,'h':False},
{'p':True,'o':False,'i':False,'u':False,'l':False,'k':False,'j':True,'h':False},
{'p':False,'o':True,'i':False,'u':False,'l':False,'k':False,'j':True,'h':False},
{'p':False,'o':False,'i':True,'u':True,'l':False,'k':False,'j':False,'h':False},
{'p':True,'o':False,'i':False,'u':False,'l':False,'k':False,'j':False,'h':True},
{'p':False,'o':True,'i':False,'u':False,'l':False,'k':True,'j':False,'h':True},
{'p':False,'o':False,'i':True,'u':False,'l':False,'k':False,'j':False,'h':False},
{'p':False,'o':False,'i':False,'u':True,'l':False,'k':False,'j':False,'h':False},
{'p':False,'o':False,'i':False,'u':False,'l':True,'k':False,'j':False,'h':False},
{'p':True,'o':True,'i':False,'u':False,'l':True,'k':False,'j':False,'h':False},
{'p':True,'o':True,'i':True,'u':True,'l':True,'k':True,'j':False,'h':True}]

VIDEOS=[MEDIAFILE1,MEDIAFILE2,MEDIAFILE3,MEDIAFILE4,MEDIAFILE5,MEDIAFILE6,MEDIAFILE7,MEDIAFILE8]

MEDIA=[MEDIAFILE1,MEDIAFILE2,MEDIAFILE3,MEDIAFILE4,MEDIAFILE5,MEDIAFILE6,MEDIAFILE7,MEDIAFILE8,MEDIAFILE9]

SOUNDS=[SOUND1,SOUND2,SOUND3,SOUND4,SOUND5,SOUND6,SOUND7,SOUND8,SOUND9]

LENGTH=[10.0,10.0,10.0,10.0,10.0,10.0,10.0,10.0]

BACKGROUND='backgroundPNG/animbackground.egg'
USE_SENSOR=False

class World(DirectObject): 

    def __init__(self):
        base.setBackgroundColor(1,1,1,1) 
        if USE_SENSOR:
            self.sensor=Buttons(self.handler)
        base.disableMouse()
        self.keys={
                   'p':False,
                   'o':False,
                   'i':False,
                   'u':False,
                   'l':False,
                   'k':False,
                   'j':False,
                   'h':False,
        }
        self.enterDefault()
        
        self.sounds=None
        self.video=None

        self.accept('p', self.handler,extraArgs=['p',True])
        self.accept('p-up', self.handler,extraArgs=['p',False])
        self.accept('o', self.handler,extraArgs=['o',True])
        self.accept('o-up', self.handler,extraArgs=['o',False])
        self.accept('u', self.handler,extraArgs=['u',True])
        self.accept('u-up', self.handler,extraArgs=['u',False])
        self.accept('i', self.handler,extraArgs=['i',True])
        self.accept('i-up', self.handler,extraArgs=['i',False])
        self.accept('l', self.handler,extraArgs=['l',True])
        self.accept('l-up', self.handler,extraArgs=['l',False])
        self.accept('k', self.handler,extraArgs=['k',True])
        self.accept('k-up', self.handler,extraArgs=['k',False])
        self.accept('j', self.handler,extraArgs=['j',True])
        self.accept('j-up', self.handler,extraArgs=['j',False])
        self.accept('h', self.handler,extraArgs=['h',True])
        self.accept('h-up', self.handler,extraArgs=['h',False])

    def enterDefault(self):
        base.camera.setPos(0,0,0)
        self.default=self.loadegg(BACKGROUND,(0,14,0),(0,0,0),(10,1,8))
        
    def handler(self,b,s):
        if self.keys[b] is not s:
            #print 'Entering IF Block with', b, s
            
            self.keys[b]=s

            for i in range(0,len(SEQUENCES)):
                if(SEQUENCES[i]['p'] is self.keys['p'] and
                   SEQUENCES[i]['o'] is self.keys['o'] and
                   SEQUENCES[i]['i'] is self.keys['i'] and
                   SEQUENCES[i]['u'] is self.keys['u'] and
                   SEQUENCES[i]['l'] is self.keys['l'] and
                   SEQUENCES[i]['k'] is self.keys['k'] and
                   SEQUENCES[i]['j'] is self.keys['j'] and
                   SEQUENCES[i]['h'] is self.keys['h']):
                    print('entrou')
                    self.removeAllVideos()
                    print('removeu')
                    self.createVideo(i)
                    break

        print b, ":" , s
        
    def setVisibility(self,obj,v):
        if v:
            obj.show()
        else:
            obj.hide()
            
    def createVideo(self,d):
        base.setBackgroundColor(1,1,1,1) 
        tex = MovieTexture("1") 
        #assert tex.read(MEDIA[d],ALPHA[d],8,1), "Failed to load video!" 
        assert tex.read(VIDEOS[d],8), "Failed to load video!" 
        # Set up a fullscreen card to set the video texture on. 
        cm = CardMaker("My Fullscreen Card"); 
        cm.setFrameFullscreenQuad() 
        cm.setUvRange(tex) 
        self.video = NodePath(cm.generate()) 
        self.video.reparentTo(render2d) 
        self.video.setTransparency(TransparencyAttrib.MAlpha) 
        self.video.setScale(self.video, 1)
        self.video.setTexture(tex) 
        self.video.setTexScale(TextureStage.getDefault(), tex.getTexScale()) 
        
        self.sounds=loader.loadSfx(SOUNDS[d]) 
        # Synchronize the video to the sound. 
        tex.synchronizeTo(self.sounds) 
        tex.stop()
        self.sounds.play()
        
    def removeVideo(self,d):
        self.sounds[d].stop()
        self.video[d].remove()    
        
    def removeAllVideos(self):
        if self.sounds is not None:
            self.sounds.stop()
            self.video.remove()                 

    def loadegg(self,path,pos,hpr,size):
        model=loader.loadModel(path)
        model.setPos(pos[0],pos[1],pos[2])
        model.setHpr(hpr[0],hpr[1],hpr[2])
        model.setScale(size[0],size[1],size[2])
        model.reparentTo(render)
        return model    

w = World() 
run() 

[/quote]

Not sure what you’re asking. You want to use the same alpha movie as the alpha channel for all of your different color movies? I think you’d have to specify it again each time, and then it will have to be decoded nine times.

I suppose you could play tricks with loading your “alpha” movie as a separate grayscale texture, then combining them in a shader. This would avoid the need to decode the alpha movie multiple times, but it would be more complicated in your scene graph.

Unless there’s a different problem you’re trying to solve instead?

David