本文整理汇总了Python中moviepy.video.VideoClip.VideoClip类的典型用法代码示例。如果您正苦于以下问题:Python VideoClip类的具体用法?Python VideoClip怎么用?Python VideoClip使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoClip类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
if has_mask:
self.get_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
mask_gf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, get_frame = mask_gf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.get_frame = lambda t: self.reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
开发者ID:DevinGeo,项目名称:moviepy,代码行数:35,代码来源:VideoFileClip.py
示例2: __init__
def __init__(self, foldername, fps, withmask=True, ismask=False):
VideoClip.__init__(self, ismask=ismask)
self.directory = foldername
self.fps = fps
self.imagefiles = sorted(os.listdir(foldername))
self.duration = 1.0 * len(self.imagefiles) / self.fps
self.end = self.duration
self.lastpos = None
self.lastimage = None
def get_frame(t):
pos = int(self.fps * t)
if pos != self.lastpos:
self.lastimage = ffmpeg_read_image(self.imagefiles[ind], withmask=withmask)
self.lastpos = pos
return self.lastimage
self.get_frame = get_frame
self.size = get_frame(0).shape[:2][::-1]
开发者ID:JoshdanG,项目名称:moviepy,代码行数:25,代码来源:DirectoryClip.py
示例3: __init__
def __init__(self, filename, ismask=False, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self, ismask)
# We store the construction parameters in case we need to make
# a copy (a 'co-reader').
self.parameters = {'filename':filename, 'ismask':ismask,
'has_mask':has_mask, 'audio':audio,
'audio_buffersize':audio_buffersize}
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.get_frame = lambda t: self.reader.get_frame(t)
# Make a reader for the audio, if any.
if audio:
try:
self.audio = AudioFileClip(filename, buffersize= audio_buffersize,
fps = audio_fps, nbytes = audio_nbytes)
except:
if verbose:
print "No audio found in %s"%filename
pass
开发者ID:ShaguptaS,项目名称:moviepy,代码行数:32,代码来源:VideoFileClip.py
示例4: __init__
def __init__(self, glob_store, freq, fft_clip, ismask=False):
def make_frame(t):
freq_amplitude = fft_clip.freq_amplitude(freq, t)
image_data = glob_store.image_from_normal(freq_amplitude)
return image_data
VideoClip.__init__(self, make_frame=make_frame, ismask=ismask, duration=fft_clip.duration)
开发者ID:sdobz,项目名称:pyfftviz,代码行数:8,代码来源:frame_source.py
示例5: __init__
def __init__(self, subtitles, make_textclip=None):
VideoClip.__init__(self, has_constant_size=False)
if isinstance( subtitles, basestring):
subtitles = file_to_subtitles(subtitles)
subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
self.subtitles = subtitles
self.textclips = dict()
if make_textclip is None:
make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold',
fontsize=24, color='white',
stroke_color='black', stroke_width=0.5)
self.make_textclip = make_textclip
self.start=0
self.duration = max([tb for ((ta,tb), txt) in self.subtitles])
self.end=self.duration
def add_textclip_if_none(t):
""" Will generate a textclip if it hasn't been generated asked
to generate it yet. If there is no subtitle to show at t, return
false. """
sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys()
if (ta<=t<tb)]
if sub == []:
sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if
(ta<=t<tb)]
if sub == []:
return False
sub = sub[0]
if sub not in self.textclips.keys():
self.textclips[sub] = self.make_textclip(sub[1])
return sub
def make_frame(t):
sub = add_textclip_if_none(t)
return (self.textclips[sub].get_frame(t) if sub
else np.array([[[0,0,0]]]))
def make_mask_frame(t):
sub = add_textclip_if_none(t)
return (self.textclips[sub].mask.get_frame(t) if sub
else np.array([[0]]))
self.make_frame = make_frame
hasmask = (self.make_textclip('T').mask is not None)
self.mask = (VideoClip(make_mask_frame, ismask=True) if hasmask else None)
开发者ID:livingbio,项目名称:moviepy,代码行数:52,代码来源:subtitles.py
示例6: __init__
def __init__(self, clips, size=None, bg_color=None, transparent=False,
ismask=False):
if size is None:
size = clips[0].size
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.transparent = transparent
self.bg_color = bg_color
self.bg = ColorClip(size, col=self.bg_color).get_frame(0)
# compute duration
ends = [c.end for c in self.clips]
if not any([(e is None) for e in ends]):
self.duration = max(ends)
self.end = max(ends)
# compute audio
audioclips = [v.audio for v in self.clips if v.audio != None]
if len(audioclips) > 0:
self.audio = CompositeAudioClip(audioclips)
# compute mask
if transparent:
maskclips = [c.mask.set_pos(c.pos) for c in self.clips
if c.mask is not None]
if maskclips != []:
self.mask = CompositeVideoClip(maskclips,self.size,
transparent=False, ismask=True)
def gf(t):
""" The clips playing at time `t` are blitted over one
another. """
f = self.bg
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.get_frame = gf
开发者ID:melvinvarkey,项目名称:moviepy,代码行数:47,代码来源:CompositeVideoClip.py
示例7: __init__
def __init__(self, subtitles, make_textclip=None):
VideoClip.__init__(self)
if isinstance( subtitles, str):
subtitles = file_to_subtitles(subtitles)
subtitles = [(map(cvsecs, tt),txt) for tt, txt in subtitles]
self.subtitles = subtitles
self.textclips = dict()
if make_textclip is None:
make_textclip = lambda txt: TextClip(txt, font='Georgia-Bold',
fontsize=24, color='white',
stroke_color='black', stroke_width=0.5)
self.make_textclip = make_textclip
self.start=0
self.duration = max([tb for ((ta,tb), txt) in self.subtitles])
self.end=self.duration
def add_textclip_if_none(t):
sub =[((ta,tb),txt) for ((ta,tb),txt) in self.textclips.keys()
if (ta<=t<tb)]
if sub == []:
sub = [((ta,tb),txt) for ((ta,tb),txt) in self.subtitles if
(ta<=t<tb)]
if sub == []:
return False
sub = sub[0]
if sub not in self.textclips.keys():
self.textclips[sub] = self.make_textclip(sub[1])
return sub
def get_frame(t):
sub = add_textclip_if_none(t)
return (self.textclips[sub].get_frame(t) if sub
else np.array([[[0,0,0]]]))
def mask_get_frame(t):
sub = add_textclip_if_none(t)
return (self.textclips[sub].mask.get_frame(t) if sub
else np.array([[0]]))
self.get_frame = get_frame
self.mask = VideoClip(ismask=True, get_frame=mask_get_frame)
开发者ID:BAT0000,项目名称:moviepy,代码行数:47,代码来源:subtitles.py
示例8: __init__
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
target_resolution=None, resize_algorithm='bicubic',
audio_fps=44100, audio_nbytes=2, verbose=False,
fps_source='tbr'):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
self.reader = None # need this just in case FFMPEG has issues (__del__ complains)
self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,
target_resolution=target_resolution,
resize_algo=resize_algorithm,
fps_source=fps_source)
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.rotation = self.reader.rotation
self.filename = self.reader.filename
if has_mask:
self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: self.reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: self.reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
开发者ID:bobatsar,项目名称:moviepy,代码行数:45,代码来源:VideoFileClip.py
示例9: __init__
def __init__(self, foldername, fps, transparent=True, ismask=False):
VideoClip.__init__(self, ismask=ismask)
self.directory = foldername
self.fps = fps
allfiles = os.listdir(foldername)
self.pics = sorted(["%s/%s" % (foldername, f) for f in allfiles
if not f.endswith(('.txt','.wav'))])
audio = [f for f in allfiles if f.endswith('.wav')]
if len(audio) > 0:
self.audio = AudioFileClip(audio[0])
self.audiofile =audio[0]
self.size = imread(self.pics[0]).shape[:2][::-1]
if imread(self.pics[0]).shape[2] == 4: # transparent png
if ismask:
def get_frame(t):
return 1.0 * imread(self.pics[int(self.fps * t)])[:, :, 3] / 255
else:
def get_frame(t):
return imread(self.pics[int(self.fps * t)])[:, :, :2]
if transparent:
self.mask = DirectoryClip(foldername, fps, ismask=True)
else:
def get_frame(t):
return imread(self.pics[int(self.fps * t)])
self.get_frame = get_frame
self.duration = 1.0 * len(self.pics) / self.fps
开发者ID:ShaguptaS,项目名称:moviepy,代码行数:37,代码来源:DirectoryClip.py
示例10: __init__
def __init__(self, filename, ismask=False, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self, ismask)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,print_infos=verbose)
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.get_frame = lambda t: self.reader.get_frame(t)
# Make a reader for the audio, if any.
if audio:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
开发者ID:theContentMint,项目名称:moviepy,代码行数:24,代码来源:VideoFileClip.py
示例11: __init__
def __init__(self, clips, size=None, bg_color=None, use_bgclip=False,
ismask=False):
if size is None:
size = clips[0].size
if use_bgclip and (clips[0].mask is None):
transparent = False
else:
transparent = (bg_color is None)
if bg_color is None:
bg_color = 0.0 if ismask else (0, 0, 0)
fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
if len(fps_list)==1:
self.fps= fps_list[0]
VideoClip.__init__(self)
self.size = size
self.ismask = ismask
self.clips = clips
self.bg_color = bg_color
if use_bgclip:
self.bg = clips[0]
self.clips = clips[1:]
else:
self.clips = clips
self.bg = ColorClip(size, col=self.bg_color)
# compute duration
ends = [c.end for c in self.clips]
if not any([(e is None) for e in ends]):
self.duration = max(ends)
self.end = max(ends)
# compute audio
audioclips = [v.audio for v in self.clips if v.audio is not None]
if len(audioclips) > 0:
self.audio = CompositeAudioClip(audioclips)
# compute mask if necessary
if transparent:
maskclips = [(c.mask if (c.mask is not None) else
c.add_mask().mask).set_pos(c.pos).set_end(c.end).set_start(c.start, change_end=False)
for c in self.clips]
self.mask = CompositeVideoClip(maskclips,self.size, ismask=True,
bg_color=0.0)
def make_frame(t):
""" The clips playing at time `t` are blitted over one
another. """
f = self.bg.get_frame(t)
for c in self.playing_clips(t):
f = c.blit_on(f, t)
return f
self.make_frame = make_frame
开发者ID:rddaz2013,项目名称:moviepy,代码行数:66,代码来源:CompositeVideoClip.py
示例12: concatenate
def concatenate(clipslist, method = 'chain', transition=None,
bg_color=(0, 0, 0), transparent=False, ismask=False, crossover = 0):
""" Concatenates several video clips
Returns a video clip made by clip by concatenating several video clips.
(Concatenated means that they will be played one after another).
if the clips do not have the same resolution, the final
resolution will be such that no clip has to be resized. As
a consequence the final clip has the height of the highest
clip and the width of the widest clip of the list. All the
clips with smaller dimensions will appear centered. The border
will be transparent if mask=True, else it will be of the
color specified by ``bg_color``.
Returns a VideoClip instance if all clips have the same size and
there is no transition, else a composite clip.
Parameters
-----------
clipslist
A list of video clips which must all have their ``duration``
attributes set.
transition
A clip that will be played between each two clips of the list.
bg_color
Color of the background, if any.
transparent
If True, the resulting clip's mask will be the concatenation of
the masks of the clips in the list. If the clips do not have the
same resolution, the border around the smaller clips will be
transparent.
"""
if transition != None:
l = [[v, transition] for v in clipslist[:-1]]
clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
transition = None
tt = np.cumsum([0] + [c.duration for c in clipslist])
sizes = [v.size for v in clipslist]
w = max([r[0] for r in sizes])
h = max([r[1] for r in sizes])
if method == 'chain':
result = VideoClip(ismask = ismask)
result.size = (w,h)
def gf(t):
i = max([i for i, e in enumerate(tt) if e <= t])
return clipslist[i].get_frame(t - tt[i])
result.get_frame = gf
if (len(set(map(tuple,sizes)))>1) and (bg_color is not None):
# If not all clips have the same size, flatten the result
# on some color
result = result.fx( on_color, (w,h), bg_color, 'center')
elif method == 'compose':
tt = np.maximum(0, tt - crossover*np.arange(len(tt)))
result = concatenate( [c.set_start(t).set_pos('center')
for (c, t) in zip(clipslist, tt)],
size = (w, h), bg_color=bg_color, ismask=ismask,
transparent=transparent)
result.tt = tt
result.clipslist = clipslist
result.start_times = tt[:-1]
result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
# Compute the mask if any
if transparent and (not ismask):
# add a mask to the clips which have none
clips_withmask = [(c if (c.mask!=None) else c.add_mask())
for c in clipslist]
result.mask = concatenate([c.mask for c in clips_withmask],
bg_color=0, ismask=True, transparent=False)
# Compute the audio, if any.
audio_t = [(c.audio,t) for c,t in zip(clipslist,tt) if c.audio!=None]
if len(audio_t)>0:
result.audio = CompositeAudioClip([a.set_start(t)
for a,t in audio_t])
return result
开发者ID:Dhertz,项目名称:moviepy,代码行数:92,代码来源:concatenate.py
示例13: concatenate_videoclips
def concatenate_videoclips(clips, method="chain", transition=None,
bg_color=None, ismask=False, padding = 0):
""" Concatenates several video clips
Returns a video clip made by clip by concatenating several video clips.
(Concatenated means that they will be played one after another).
There are two methods:
- method="chain": will produce a clip that simply outputs
the frames of the succesive clips, without any correction if they are
not of the same size of anything. If none of the clips have masks the
resulting clip has no mask, else the mask is a concatenation of masks
(using completely opaque for clips that don't have masks, obviously).
If you have clips of different size and you want to write directly the
result of the concatenation to a file, use the method "compose" instead.
- method="compose", if the clips do not have the same
resolution, the final resolution will be such that no clip has
to be resized.
As a consequence the final clip has the height of the highest
clip and the width of the widest clip of the list. All the
clips with smaller dimensions will appear centered. The border
will be transparent if mask=True, else it will be of the
color specified by ``bg_color``.
If all clips with a fps attribute have the same fps, it becomes the fps of
the result.
Parameters
-----------
clips
A list of video clips which must all have their ``duration``
attributes set.
method
"chain" or "compose": see above.
transition
A clip that will be played between each two clips of the list.
bg_color
Only for method='compose'. Color of the background.
Set to None for a transparent clip
padding
Only for method='compose'. Duration during two consecutive clips.
Note that for negative padding, a clip will partly play at the same
time as the clip it follows (negative padding is cool for clips who fade
in on one another). A non-null padding automatically sets the method to
`compose`.
"""
if transition is not None:
l = [[v, transition] for v in clips[:-1]]
clips = reduce(lambda x, y: x + y, l) + [clips[-1]]
transition = None
tt = np.cumsum([0] + [c.duration for c in clips])
sizes = [v.size for v in clips]
w = max([r[0] for r in sizes])
h = max([r[1] for r in sizes])
tt = np.maximum(0, tt + padding*np.arange(len(tt)))
if method == "chain":
def make_frame(t):
i = max([i for i, e in enumerate(tt) if e <= t])
return clips[i].get_frame(t - tt[i])
result = VideoClip(ismask = ismask, make_frame = make_frame)
if any([c.mask is not None for c in clips]):
masks = [c.mask if (c.mask is not None) else
ColorClip([1,1], col=1, ismask=True, duration=c.duration)
#ColorClip(c.size, col=1, ismask=True).set_duration(c.duration)
for c in clips]
result.mask = concatenate_videoclips(masks, method="chain", ismask=True)
result.clips = clips
elif method == "compose":
result = CompositeVideoClip( [c.set_start(t).set_pos('center')
for (c, t) in zip(clips, tt)],
size = (w, h), bg_color=bg_color, ismask=ismask)
result.tt = tt
result.start_times = tt[:-1]
result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]
if len(audio_t)>0:
result.audio = CompositeAudioClip([a.set_start(t)
for a,t in audio_t])
#.........这里部分代码省略.........
开发者ID:410063005,项目名称:moviepy,代码行数:101,代码来源:concatenate.py
示例14: concatenate
def concatenate(clipslist, method="chain", transition=None, bg_color=(0, 0, 0),
transparent=False, ismask=False, padding = 0):
""" Concatenates several video clips
Returns a video clip made by clip by concatenating several video clips.
(Concatenated means that they will be played one after another).
There are two methods: method="chain" will produce a clip that simply outputs
the frames of the succesive clips, without any correction if they are
not of the same size of anything.
With method="compose", if the clips do not have the same
resolution, the final resolution will be such that no clip has
to be resized.
As a consequence the final clip has the height of the highest
clip and the width of the widest clip of the list. All the
clips with smaller dimensions will appear centered. The border
will be transparent if mask=True, else it will be of the
color specified by ``bg_color``.
Returns a VideoClip instance if all clips have the same size and
there is no transition, else a composite clip.
Parameters
-----------
clipslist
A list of video clips which must all have their ``duration``
attributes set.
method
"chain" or "compose": see above.
transition
A clip that will be played between each two clips of the list.
bg_color
Color of the background, if any.
transparent
If True, the resulting clip's mask will be the concatenation of
the masks of the clips in the list. If the clips do not have the
same resolution, the border around the smaller clips will be
transparent.
padding
Duration during two consecutive clips. If negative, a clip will
play at the same time as the clip it follows. A non-null
padding automatically sets the method to `compose`.
"""
if transition != None:
l = [[v, transition] for v in clipslist[:-1]]
clipslist = reduce(lambda x, y: x + y, l) + [clipslist[-1]]
transition = None
tt = np.cumsum([0] + [c.duration for c in clipslist])
sizes = [v.size for v in clipslist]
w = max([r[0] for r in sizes])
h = max([r[1] for r in sizes])
tt = np.maximum(0, tt + padding*np.arange(len(tt)))
if method == "chain":
def gf(t):
i = max([i for i, e in enumerate(tt) if e <= t])
return clipslist[i].get_frame(t - tt[i])
result.get_frame = gf
result = VideoClip(ismask = ismask, get_frame = gf)
if transparent:
clips_w_masks = [(c.add_mask() if c.mask is None else c) for c in clips]
masks = [c.mask for c in clips_w_masks]
result.mask = concatenate(masks, method="chain", ismask=True)
elif method == "compose":
result = CompositeVideoClip( [c.set_start(t).set_pos('center')
for (c, t) in zip(clipslist, tt)],
size = (w, h), bg_color=bg_color, ismask=ismask,
transparent=transparent )
result.tt = tt
result.clipslist = clipslist
result.start_times = tt[:-1]
result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
audio_t = [(c.audio,t) for c,t in zip(clipslist,tt) if c.audio!=None]
if len(audio_t)>0:
result.audio = CompositeAudioClip([a.set_start(t)
for a,t in audio_t])
return result
开发者ID:b-w-d,项目名称:moviepy,代码行数:96,代码来源:concatenate.py
注:本文中的moviepy.video.VideoClip.VideoClip类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论