本文整理汇总了Python中video.create_capture函数的典型用法代码示例。如果您正苦于以下问题:Python create_capture函数的具体用法?Python create_capture怎么用?Python create_capture使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了create_capture函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 3
self.tracks = {}
self.cam = video.create_capture(video_src)
self.frame_idx = 0
self.prev_gray = {}
开发者ID:victorkaminskiy,项目名称:aico,代码行数:7,代码来源:test.py
示例2: main
def main():
try:
fn = sys.argv[1]
except:
fn = 0
cv2.namedWindow('edge')
cap = video.create_capture(fn)
cv2.setMouseCallback('edge', onmouse)
global ser
ser = serial.Serial('COM4',9600)
count =0
while True:
#print seed_pt[0]
flag, img = cap.read()
vis=img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if count%2==0:
main_check(vis,gray,seed_pt)
cv2.imshow('edge', vis)
count+=1
ch = cv2.waitKey(5) & 0xFF
if ch == 27:
break
开发者ID:superdun,项目名称:Python,代码行数:25,代码来源:撑杆过河.py
示例3: __init__
def __init__(self, video_src, circles, csvFlag, rotation = 0):
"""
Initializes main function for execution
:param video_src: path to video to be analyzed
:param circles: path to text file containing joints
:param csvFlag: flag if video src is actually a csv that should be read in. ONLY USE THIS WITH DEBUG MODE!
"""
self.roguePoints = []
self.rotation = rotation
self.csvFlag = csvFlag
if not csvFlag:
self.cam = video.create_capture(video_src)
else:
self.cam = csv.reader(open(video_src, 'r'), delimiter=',', quotechar='|')
self.frame_idx = -1#This is because the frame index updates BEFORE anything is done.
#Save frame to start at and the initial circles.
self.allJoints = []
f = open(circles, 'r')
self.initalFrame = int(f.readline())
for line in f:
read = map(float, line.split())#x y r (with spaces)
circle = j.Circle(read[0], read[1] , read[2])
self.allJoints.append( j.Joint(circle, prm.TRACK_LEN ) )# circle in form [(x,y),r]
f.close()
开发者ID:dgarwin,项目名称:SquatCop-DataCollection,代码行数:25,代码来源:Main.py
示例4: main
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
leveln = 6
cv.namedWindow('level control')
for i in xrange(leveln):
cv.createTrackbar('%d'%i, 'level control', 5, 50, nothing)
while True:
ret, frame = cap.read()
pyr = build_lappyr(frame, leveln)
for i in xrange(leveln):
v = int(cv.getTrackbarPos('%d'%i, 'level control') / 5)
pyr[i] *= v
res = merge_lappyr(pyr)
cv.imshow('laplacian pyramid filter', res)
if cv.waitKey(1) == 27:
break
print('Done')
开发者ID:adamrankin,项目名称:opencv,代码行数:29,代码来源:lappyr.py
示例5: dictionay_learning_MHOF_online
def dictionay_learning_MHOF_online(training_samples_num=400):
from MHOF_Extraction import MHOF_Extraction
from MHOF_histogram_block import MHOF_histogram_block
from sklearn.decomposition import MiniBatchDictionaryLearning
import numpy as np
import cv2
import video
cam=video.create_capture('Crowd-Activity-All.avi')
height_block_num=4
width_block_num=5
bin_num=16
ret,prev=cam.read()
ret,img=cam.read()
flow_H=MHOF_Extraction(prev,img)
flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
flow_hist_H=np.reshape(flow_hist_H,[1,flow_hist_H.size])
# error!!!!
dico=MiniBatchDictionaryLearning(1,alpha=1,n_iter=500)
dic=dico.fit(flow_hist_H).components_
for i in range(training_samples_num):
ret,img=cam.read()
flow_H=MHOF_Extraction(prev,img)
flow_hist_H=MHOF_histogram_block(flow_H,height_block_num,width_block_num,bin_num)
dico=MiniBatchDictionaryLearing(i+1,alpha=1,n_iter=500,dict_init=dic)
dic=dico.fit(flow_hist_H).components
return dic
开发者ID:burness,项目名称:MHOF,代码行数:26,代码来源:dictionary_learning_MHOF_online.py
示例6: main
def main():
try:
video_src = sys.argv[1]
except:
video_src = 0
cam = video.create_capture(video_src)
mser = cv.MSER_create()
while True:
ret, img = cam.read()
if ret == 0:
break
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
vis = img.copy()
regions, _ = mser.detectRegions(gray)
hulls = [cv.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv.polylines(vis, hulls, 1, (0, 255, 0))
cv.imshow('img', vis)
if cv.waitKey(5) == 27:
break
print('Done')
开发者ID:adamrankin,项目名称:opencv,代码行数:25,代码来源:mser.py
示例7: __init__
def __init__(self, video_src, paused = False):
self.cap = video.create_capture(video_src)
_, self.frame = self.cap.read()
cv2.imshow('frame', self.frame)
self.rect_sel = RectSelector('frame', self.onrect)
self.trackers = []
self.paused = paused
开发者ID:AnnaPetrovicheva,项目名称:opencv,代码行数:7,代码来源:mosse.py
示例8: __init__
def __init__(self, video_src, skipFrame):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
self.skipFrame = skipFrame
开发者ID:zbxzc35,项目名称:opticalFlow,代码行数:7,代码来源:extractDepth_lk.py
示例9: main
def main():
try:
fn = sys.argv[1]
except:
fn = 0
def nothing(*arg):
pass
cv.namedWindow('edge')
cv.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)
cv.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)
cap = video.create_capture(fn)
while True:
flag, img = cap.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
thrs1 = cv.getTrackbarPos('thrs1', 'edge')
thrs2 = cv.getTrackbarPos('thrs2', 'edge')
edge = cv.Canny(gray, thrs1, thrs2, apertureSize=5)
vis = img.copy()
vis = np.uint8(vis/2.)
vis[edge != 0] = (0, 255, 0)
cv.imshow('edge', vis)
ch = cv.waitKey(5)
if ch == 27:
break
print('Done')
开发者ID:adamrankin,项目名称:opencv,代码行数:29,代码来源:edge.py
示例10: __init__
def __init__(self, src, threshold = 25, doRecord=True, showWindows=True):
self.doRecord = doRecord
self.show = showWindows
self.frame = None
self.cap = video.create_capture(src)
self.cap.set(3,1280)
self.cap.set(4,2316)
self.ret, self.frame = self.cap.read() #Take a frame to init recorder
self.frame_rate = self.cap.get(5)
print self.frame_rate
self.gray_frame = np.zeros((self.cap.get(3), self.cap.get(4), 1), np.uint8)
self.average_frame = np.zeros((self.cap.get(3), self.cap.get(4), 3), np.float32)
self.absdiff_frame = None
self.previous_frame = None
self.surface = self.cap.get(3) * self.cap.get(4)
self.currentsurface = 0
self.currentcontours = None
self.threshold = threshold
self.isRecording = False
self.tracks = []
self.tracks_dist = []
self.track_len = 3
self.frame_idx = 0
self.detect_interval = 5
# self.font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 2, 8) #Creates a font
self.trigger_time = 0
if showWindows:
cv2.namedWindow("Image", cv2.WINDOW_AUTOSIZE)
开发者ID:adrikayak,项目名称:Duckter,代码行数:33,代码来源:objdet.py
示例11: detectFace
def detectFace(self):
print "stuff"
import sys, getopt
print help_message
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
# cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
cam = create_capture(video_src, fallback='synth:bg=lena.jpg:noise=0.05')
found = False
while True:
ret, img = cam.read()
rects = self.detect(img, cascade)
vis = img.copy()
if(rects != [] and found == False):
print "here"
if goodOrBad(img,cascade):
found = True
cv2.imshow('facedetect', vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
开发者ID:shorttheworld,项目名称:FaceRecognition,代码行数:29,代码来源:facedetect.py
示例12: __init__
def __init__(self, video_src):
self.track_len = 25
self.detect_interval = 1
self.tracks = []
self.capture = cv2.VideoCapture(video_src)
self.frame_width = int(self.capture.get(cv.CV_CAP_PROP_FRAME_WIDTH) / 2)
self.frame_height = int(self.capture.get(cv.CV_CAP_PROP_FRAME_HEIGHT) / 2)
self.frame_size = (self.frame_width, self.frame_height)
self.grid_width = int(self.frame_width / 8 / 2)
self.grid_height = int(self.frame_height / 8 / 2)
self.grid_size = (self.grid_width, self.grid_height)
self.total_frame_count = int(self.capture.get(cv.CV_CAP_PROP_FRAME_COUNT))
print self.frame_size
print self.grid_size
print self.total_frame_count
self.data_path = str(video_src) + ".oflw"
print self.data_path
self.fp = np.memmap(self.data_path, dtype="float32", mode="w+", shape=(self.total_frame_count, (512 + 128)))
print "FP shape: ", self.fp.shape
self.cam = video.create_capture(video_src)
self.frame_idx = 0
开发者ID:kitefishlabs,项目名称:synaesthesia,代码行数:25,代码来源:lk_track.py
示例13: __init__
def __init__(self, src):
self.cap = video.create_capture(src)
self.frame = None
self.rect_obj = None
cv2.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
开发者ID:dongbi,项目名称:5,代码行数:7,代码来源:selecter.py
示例14: compare
def compare(face_to_check,learn=False):
import sys, getopt
detected_time = 0
detected_time_max = 10
video_src = 0
cascade_fn = os.path.join('data','haarcascades','haarcascade_frontalface_alt2.xml')
cascade = cv2.CascadeClassifier(cascade_fn)
cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
while True:
ret, img1 = cam.read()
gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
if len(rects):
if detected_time<detected_time_max:
detected_time+=1
else:
_found_size = (rects[0][0],rects[0][1],rects[0][2]-rects[0][0],
rects[0][3]-rects[0][1])
_found_face = cv.GetImage(cv.fromarray(img1))
cv.SetImageROI(_found_face,_found_size)
current_face = cv.CreateImage(cv.GetSize(_found_face),
_found_face.depth,
_found_face.nChannels)
if learn:
cv.Copy(_found_face, current_face, None)
cv.SaveImage(os.path.join('data','images',face_to_check),current_face)
cv.ResetImageROI(cv.GetImage(cv.fromarray(img1)))
img2 = cv.LoadImage(os.path.join('data','images',face_to_check))
dest_face = cv.CreateImage(cv.GetSize(img2),
img2.depth,
img2.nChannels)
cv.Resize(_found_face, dest_face)
if cv.Norm(dest_face,img2)<=30000:
return True
else:
return False
sys,exit()
else:
detected_time = 0
dt = clock() - t
开发者ID:j0z,项目名称:SwanBot,代码行数:59,代码来源:facedetect.py
示例15: main
def main():
cap = video.create_capture()
classifier_fn = 'digits_svm.dat'
if not os.path.exists(classifier_fn):
print '"%s" not found, run digits.py first' % classifier_fn
return
model = SVM()
model.load('digits_svm.dat')
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 10)
bin = cv2.medianBlur(bin, 3)
contours, heirs = cv2.findContours( bin.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = map(cv2.boundingRect, contours)
valid_flags = [ 16 <= h <= 64 and w <= 1.2*h for x, y, w, h in rects]
for i, cnt in enumerate(contours):
if not valid_flags[i]:
continue
_, _, _, outer_i = heirs[0, i]
if outer_i >=0 and valid_flags[outer_i]:
continue
x, y, w, h = rects[i]
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
sub = bin[y:,x:][:h,:w]
#sub = ~cv2.equalizeHist(sub)
#_, sub_bin = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
s = 1.5*float(h)/SZ
m = cv2.moments(sub)
m00 = m['m00']
if m00/255 < 0.1*w*h or m00/255 > 0.9*w*h:
continue
c1 = np.float32([m['m10'], m['m01']]) / m00
c0 = np.float32([SZ/2, SZ/2])
t = c1 - s*c0
A = np.zeros((2, 3), np.float32)
A[:,:2] = np.eye(2)*s
A[:,2] = t
sub1 = cv2.warpAffine(sub, A, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
sub1 = deskew(sub1)
if x+w+SZ < frame.shape[1] and y+SZ < frame.shape[0]:
frame[y:,x+w:][:SZ, :SZ] = sub1[...,np.newaxis]
sample = preprocess_hog([sub1])
digit = model.predict(sample)[0]
cv2.putText(frame, '%d'%digit, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
cv2.imshow('frame', frame)
cv2.imshow('bin', bin)
if cv2.waitKey(1) == 27:
break
开发者ID:MasaMune692,项目名称:alcexamples,代码行数:59,代码来源:digits_video.py
示例16: __init__
def __init__(self, src):
self.cap = video.create_capture(src)
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv2.namedWindow("plane")
self.rect_sel = common.RectSelector("plane", self.on_rect)
开发者ID:eladj,项目名称:opencv,代码行数:8,代码来源:plane_tracker.py
示例17: __init__
def __init__(self, src):
self.cap = video.create_capture(src, presets['book'])
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv2.namedWindow('plane')
self.rect_sel = common.RectSelector('plane', self.on_rect)
开发者ID:MCobias,项目名称:opencv,代码行数:8,代码来源:plane_tracker.py
示例18: __init__
def __init__(self, video_src):
self.track_len = 10
self.detect_interval = 5
self.tracks = []
self.cam = video.create_capture(video_src)
self.frame_idx = 0
self.screenx=700
self.screeny=550
开发者ID:josiahbjorgaard,项目名称:motiondetection,代码行数:8,代码来源:lk_track_grid2.py
示例19: __init__
def __init__(self, src):
self.cap = video.create_capture(src)
self.frame = None
self.paused = False
self.tracker = PlaneTracker()
cv2.namedWindow('plane')
cv2.createTrackbar('focal', 'plane', 25, 50, common.nothing)
self.rect_sel = common.RectSelector('plane', self.on_rect)
开发者ID:2693,项目名称:opencv,代码行数:9,代码来源:plane_ar.py
示例20: __init__
def __init__(self, video_src):
self.cam = video.create_capture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
cv2.setMouseCallback('camshift', self.onmouse)
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
开发者ID:srini90,项目名称:facerecog,代码行数:9,代码来源:camshift.py
注:本文中的video.create_capture函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论