本文整理汇总了Python中pylab.ginput函数的典型用法代码示例。如果您正苦于以下问题:Python ginput函数的具体用法?Python ginput怎么用?Python ginput使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ginput函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: splitTrial
def splitTrial(self, baseChannel):
self.baseChannel = baseChannel
self.ax.plot(self.rawData[self.baseChannel])
numElements = self.rawData.shape[0]
self.ax1 = plt.axes([0.0, 0.5, 0.1, 0.075])
self.b1 = Button(self.ax1, 'Submit')
self.b1.on_clicked(self.onSubmit)
begin = ginput(1)
end = ginput(1)
length = int(end[0][0] - begin[0][0])
self.iBegins = [int(begin[0][0]) + i * length for i in xrange(self.numTrials)]
self.iEnds = [int(begin[0][0]) + (i + 1) * length - 1 for i in xrange(self.numTrials)]
[minL, maxL] = getYValueLimits(self.rawData, self.baseChannel)
for iLine in xrange(self.numTrials):
self.endlines.append(self.ax.axvline(self.iEnds[iLine], 0, maxL, color='k', picker=5))
self.fig.canvas.mpl_connect('pick_event', self.onPick)
self.fig.canvas.mpl_connect('key_press_event', self.onKey)
# self.fig.canvas.draw()
plt.show()
开发者ID:johnrocamora,项目名称:Grinder,代码行数:28,代码来源:grinder.py
示例2: acquire_masks
def acquire_masks(self):
im1 = self.cam2.get()
pl.imshow(im1, cmap='gray')
pl.title('Select Eye')
pts_eye = pl.ginput(n=0, timeout=0)
pts_eye = np.array(pts_eye, dtype=np.int32)
mask_eye = np.zeros(im1.shape, dtype=np.int32)
cv2.fillConvexPoly(mask_eye, pts_eye, (1,1,1), lineType=cv2.LINE_AA)
pl.clf()
im2 = self.cam2.get()
pl.imshow(im2, cmap='gray')
pl.title('Select Wheel')
pl.gcf().canvas.draw()
pts_wheel = pl.ginput(n=0, timeout=0)
pts_wheel = np.array(pts_wheel, dtype=np.int32)
mask_wheel = np.zeros(im2.shape, dtype=np.int32)
cv2.fillConvexPoly(mask_wheel, pts_wheel, (1,1,1), lineType=cv2.LINE_AA)
pl.close()
self.mask = np.array([mask_eye, mask_wheel])
self.mask_flat = self.mask.reshape((2,-1))
return self.mask
开发者ID:bensondaled,项目名称:eyeblink,代码行数:25,代码来源:expts.py
示例3: get_calibration
def get_calibration(fignum = 1, pct_wide = 0.2, pct_high = 0.33, tunneldicts=None):
ax = pylab.figure(fignum).axes[0]
ymax = round(ax.get_ybound()[1])
xmax = round(ax.get_xbound()[1])
pix_wide = ymax*pct_wide
pix_high = ymax*pct_high
print >> sys.stderr, 'image is %s x %s; windows will be %s, %s' % (xmax,ymax,pix_wide,pix_high)
cm_dists = []
print >> sys.stderr, 'click 0, 1, 10 cm marks'
pylab.xlim(0,pix_high)
pylab.ylim(ymax,ymax-pix_wide)
p0,p1,p10 = pylab.ginput(3,0)
cm_dists.append(vidtools.hypotenuse(p0,p1))
cm_dists.append(vidtools.hypotenuse(p0,p10)/10.0)
print >> sys.stderr, 'click 0, 1, 10 cm marks'
pylab.xlim(xmax-pix_wide,xmax)
pylab.ylim(pix_high,0)
p0,p1,p10 = pylab.ginput(3,0)
cm_dists.append(vidtools.hypotenuse(p0,p1))
cm_dists.append(vidtools.hypotenuse(p0,p10)/10.0)
print >> sys.stderr, 'click bolt 1'
pylab.xlim(0,pix_wide)
pylab.ylim(ymax,ymax-pix_wide)
horiz = [pylab.ginput(1,0)[0]]
print >> sys.stderr, 'click bolt 2'
pylab.xlim(xmax-pix_wide,xmax)
pylab.ylim(ymax,ymax-pix_wide)
horiz.append(pylab.ginput(1,0)[0])
pylab.xlim(0,xmax)
pylab.ylim(ymax,0)
if tunneldicts is not None:
tunneldicts['cm_pts'] = cm_dists
tunneldicts['horiz'] = horiz
else:
return cm_dists,horiz
开发者ID:brantp,项目名称:video_analysis,代码行数:35,代码来源:measure_burrows_from_image.py
示例4: wait
def wait(msg=None):
"""Wait for the user to acknowledge the plot."""
import pylab
#from matplotlib.blocking_input import BlockingInput
if msg: print msg
#block = BlockingInput(fig=pylab.gcf(), eventslist=('key_press_event',))
#block(n=1, timeout=-1)
pylab.ginput()
开发者ID:reflectometry,项目名称:direfl,代码行数:10,代码来源:simulate.py
示例5: plotgrid
def plotgrid(data,d=10,shape=(30,30)):
"""Plot a list of images on a grid."""
ion()
gray()
clf()
for i in range(min(d*d,len(data))):
subplot(d,d,i+1)
row = data[i]
if shape is not None: row = row.reshape(shape)
imshow(row)
ginput(1,timeout=0.1)
开发者ID:stweil,项目名称:ocropy,代码行数:11,代码来源:common.py
示例6: touch_with_func
def touch_with_func():
plot_data = {}
read_data = CVMCv1Parser.from_file('cv1.txt')
for i in range(0, 1):
data = read_data.data[i]['data']
temp = read_data.data[i]['temp']
data.set_free_energy_00_from_logtxt('log.txt')
data.set_end_energy(0, 0)
err = make_err_func(func)
points = []
for j in range(15):
pylab.plot(data['comp1'], data['corrected_g'], '+')
pylab.axvline(x=1/16*(j+1), color='black')
try:
p = pylab.ginput(n=1, timeout=2)
while p == []:
p = pylab.ginput(n=1, timeout=2)
print(1/16*(j+1), p[0][1])
except KeyboardInterrupt:
print("座標習得を諦めました")
exit()
points.append(p[0])
pylab.close()
points = np.array(points)
param = optimize.leastsq(
err, [1, 1, 1, 1, 1], args=(points[:, 0],
points[:, 1]))[0]
judge = np.abs(err(param, data['comp1'], data['corrected_g'])) < 1
inliersx = data['comp1'][judge]
inliersy = data['corrected_g'][judge]
x = inliersx
y = inliersy
opt = optimize.leastsq(
err, [1, 1, 1, 1, 1], args=(x, y))[0]
fitx = np.linspace(0, 1, 100)
fity = func(opt, fitx)
plot_data.update({temp:[data['comp1'], data['corrected_g'], fitx, fity]})
i = 1
for temp in plot_data:
pd = plot_data[temp]
d = (33 // (len(plot_data) + 1))
pylab.plot(pd[0], pd[1], '+',
color=COLOR_PALETTE[d*i][1])
pylab.plot(pd[2], pd[3], color=COLOR_PALETTE[d*i][0], linewidth=2)
save_results(temp, pd)
i += 1
pylab.xlim(0, 1)
pylab.show()
开发者ID:hackberie,项目名称:00_workSpace,代码行数:52,代码来源:cv1parser.py
示例7: wincoord
def wincoord(winno):
cor1 = 1 ; cor2 = 0
while cor1 > cor2:
print("Please click at the start (left-side) of the window %d" % (winno))
cor1 = plt.ginput(1)[0][0]
print(cor1)
print("Please click at the end (right-side) of the window %d" % (winno))
cor2 = plt.ginput(1)[0][0]
print(cor2)
if cor1 > cor2:
#print('pixel coordinates of the start of the window is greater than the end! So lets restarts input for window ' % (winno))
# Check if coordinates were entered in Arabic fashion! Swap if needed.
cor1, cor2 = cor2, cor1
return round(cor1,0),round(cor2,0)
开发者ID:vkaustubh,项目名称:ksda_tools,代码行数:14,代码来源:ApertureLibrary.py
示例8: Set_Masks
def Set_Masks(Filename,number,message,save_name):
Video = cv2.VideoCapture(Filename)
height = int(Video.get(4))
width = int(Video.get(3))
frames = int(Video.get(cv.CV_CAP_PROP_FRAME_COUNT))
FOURCC = cv2.cv.CV_FOURCC(*'XVID')
color_names_RBG = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(255,0,255),(0,255,255),(0,0,0)] #BGR
s,frame = Video.read()
disp_frame = frame[:]
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
Masks = np.zeros((number,height,width),dtype=np.uint8)
for n in range(number):
pl.figure()
pl.title(message+" %i"%(n+1))
pl.imshow(disp_frame, cmap=mpl_cm.Greys_r)
pts = []
while not len(pts):
pts = pl.ginput(0)
pts = np.array(pts, dtype=np.int32)
pl.close()
path = mpl_path.Path(pts)
for ridx,row in enumerate(frame):
for cidx,pt in enumerate(row):
if path.contains_point([cidx, ridx]):
Masks[n,ridx,cidx] = 1
cv2.polylines(disp_frame, [pts], 1, color_names_RBG[n%6], thickness=1) #BGR
cv2.imwrite(save_name,disp_frame)
Video.release()
return Masks
开发者ID:jfond,项目名称:MPA,代码行数:35,代码来源:Main_Analysis.py
示例9: fit_and_plot
def fit_and_plot(self, image):
print "fit and plot"
p = self.parameters.IonsOnCamera
x_axis = np.arange(p.horizontal_min, p.horizontal_max + 1, self.image_region[0])
y_axis = np.arange(p.vertical_min, p.vertical_max + 1, self.image_region[1])
xx, yy = np.meshgrid(x_axis, y_axis)
#import IPython
#IPython.embed()
#self.fitter.report(params)
#ideally graphing should be done by saving to data vault and using the grapher
#p = Process(target = self.fitter.graph, args = (x_axis, y_axis, image, params, result))
#p.start()
pylab.imshow(image)
positions = pylab.ginput(0)
positions = [x + np.min(x_axis) for x,y in positions]
self.fitter = ion_state_detector(positions)
result, params = self.fitter.guess_parameters_and_fit(xx, yy, image)
self.fitter.graph(x_axis, y_axis, image, params, result)
position_list = []
try:
i = 0
while(True):
position_list.append(params['pos'+str(i)].value)
i += 1
except KeyError:
pass
self.pv.set_parameter('IonsOnCamera','fit_background_level', params['background_level'].value)
self.pv.set_parameter('IonsOnCamera','fit_amplitude', params['amplitude'].value)
self.pv.set_parameter('IonsOnCamera','ion_positions', position_list) #TODO ?? FIXME
self.pv.set_parameter('IonsOnCamera','ion_number', len(position_list)) #TODO ?? FIXME
self.pv.set_parameter('IonsOnCamera','fit_sigma', params['sigma'].value)
开发者ID:HaeffnerLab,项目名称:cct,代码行数:35,代码来源:reference_image.py
示例10: etch
def etch(self, refmask=[0]):
"""Cut away at surface. Can only click polygons for now.
Optionally input reference mask to guide etching.
"""
size = self.size
mask = n.ones((self.size,self.size), dtype='bool')
print 'Click for points of region to etch (right click to exit).'
p.figure(1)
if len(refmask) != 1:
p.imshow(n.transpose(-refmask), aspect='auto', origin='lower', interpolation='nearest', cmap=p.cm.Greys, extent=(0,1,0,1), vmax=0.5)
else:
p.imshow(n.transpose(-self.mask), aspect='auto', origin='lower', interpolation='nearest', cmap=p.cm.Greys, extent=(0,1,0,1), vmax=0.5)
xy = p.ginput(n=0, timeout=3000)
xy = n.array(xy)
print 'Calculating remaining region...'
p.figure(1)
p.axis([0,1,0,1])
p.fill(xy[:,0],xy[:,1],'k')
for i in range(size):
for j in range(size):
mask[i,j] = not(point_inside_polygon(float(i)/size, float(j)/size, xy))
self.mask = self.mask * mask
开发者ID:caseyjlaw,项目名称:misc,代码行数:26,代码来源:printing.py
示例11: selectPoints
def selectPoints(im1_path, im2_path):
im = Image.open(im1_path)
plt.imshow(im)
counter, f_points = constant.TOTAL_FEATURE, []
while counter != 0:
print "Click on screen!"
x = ginput(1)
counter -= 1
f_points.append([x[0][0], x[0][1]])
plt.scatter(x[0][0], x[0][1])
plt.draw()
print("Clicked point at ", x, " | Clicks left: ", counter)
plt.show()
second_points = drag_control_points(mpimg.imread(im2_path), np.array(f_points))
intermediate_feature = interpolatePts(combinePoints(f_points, second_points))
frames = combineImages(intermediate_feature, constant.TRIANGLES, im1_path, im2_path)
frames.extend(frames[::-1])
# otherone = [cv2.cvtColor(items, cv2.COLOR_RGB2BGR) for items in frames]
# writeGif("lol.GIF", otherone, duration=0.07)
while True:
for i in range (0, len(frames)):
f = frames[i]
cv2.waitKey(20)
cv2.imshow("Cameras",f)
cv2.waitKey(20)
开发者ID:NeedFR,项目名称:facemorpher,代码行数:26,代码来源:main.py
示例12: getCoordinate
def getCoordinate(direction='both',axh=None,fig=None):
"""Tool for selecting a coordinate, functionality similar to ginput for a single point. Finish with right mouse button."""
if not axh:
axh = pl.gca()
if not fig: fig = pl.gcf()
hor=False;ver=False
if direction is 'horizontal' or 'hor' or 'both':
hor=True
if direction is 'vertical' or 'ver' or 'both':
ver=True
finished=False
def button_press_callback(event):
if event.inaxes:
if event.button == 3:
finished = True
fig.canvas.mpl_connect('button_press_event', button_press_callback)
print("Select a coordinate, finish with right click.")
linh = []
while not finished:
for tlinh in linh:
tlinh.remove()
linh = []
pl.draw()
pos = pl.ginput(1)[0]
if hor:
linh.append(pl.axvline(pos[0]))
if ver:
linh.append(pl.axhline(pos[1]))
pl.draw()
pl.waitforbuttonpress()
fig.canvas.draw()
return pos
开发者ID:htlemke,项目名称:ixppy,代码行数:35,代码来源:toolsPlot.py
示例13: _Delete_spectra_fired
def _Delete_spectra_fired(self):
plt.figure()
plt.contourf(Data.wavelength, Data.time, Data.TrA_Data, 100)
plt.title('Pick between times to delete (top to bottom)')
plt.xlabel('Wavelength')
plt.ylabel('Time')
fittingto = np.array(ginput(2))
plt.show()
plt.close()
index_time_top=(np.abs(Data.time-fittingto[1,1])).argmin()
index_time_bottom=(np.abs(Data.time-fittingto[0,1])).argmin()+1
if index_time_bottom <= index_time_top:
hold = index_time_top
index_time_top = index_time_bottom
index_time_bottom = hold
if index_time_top == 0:
Data.TrA_Data = Data.TrA_Data[index_time_bottom:,:]
Data.time = Data.time[index_time_bottom:]
if index_time_bottom == Data.time.shape:
Data.TrA_Data = Data.TrA_Data[:index_time_top,:]
Data.time = Data.time[:index_time_top]
if index_time_top != 0 & index_time_bottom != Data.time.shape:
Data.TrA_Data = np.vstack((Data.TrA_Data[:index_time_top,:],Data.TrA_Data[index_time_bottom:,:]))
Data.time = np.hstack((Data.time[:index_time_top],Data.time[index_time_bottom:]))
self.log = "%s \nDeleted spectra between %s and %s" %(self.log,fittingto[0,1],fittingto[1,1])
开发者ID:Margauxair,项目名称:PyTrA,代码行数:31,代码来源:PyTrA.py
示例14: get_terminus
def get_terminus(self):
from matplotlib.widgets import Cursor
if self.mask_computed == True:
self.mask = dbg.initialize_mask(self.thk)
plt.clf()
plt.pcolormesh(self.x, self.y, self.mask)
plt.contour(self.x, self.y, self.z, colors='black')
plt.axis('tight')
plt.axes().set_aspect('equal')
plt.draw()
plt.setp(plt.gca(),autoscale_on=False)
cursor = Cursor(plt.axes(), useblit=True, color='white', linewidth=1 )
if self.ph is not None and self.mask_computed == False:
for p in self.ph:
p.remove()
self.ph = None
pts = []
while len(pts) < 4:
pts = np.asarray( plt.ginput(4, timeout=-1) )
self.ph = plt.fill(pts[:,0], pts[:,1], 'white', lw = 2, alpha=0.5)
plt.draw()
self.pts = pts
self.mask_computed = False
开发者ID:ckhroulev,项目名称:dbg-playground,代码行数:32,代码来源:pism_regional.py
示例15: get_terminus
def get_terminus():
from matplotlib.widgets import Cursor
def tellme(s):
print s
plt.title(s,fontsize=16)
plt.draw()
plt.setp(plt.gca(),autoscale_on=False)
cursor = Cursor(plt.axes(), useblit=True, color='white', linewidth=1 )
happy = False
while not happy:
pts = []
while len(pts) < 4:
tellme('Select 4 corners of the terminus region')
pts = np.asarray( plt.ginput(4, timeout=-1) )
if len(pts) < 4:
tellme('Too few points, starting over')
time.sleep(1) # Wait a second
ph = plt.fill(pts[:,0], pts[:,1], 'white', lw = 2, alpha=0.5)
tellme('Done? Press any key if yes, mouse click to reset')
happy = plt.waitforbuttonpress()
# Get rid of fill
if not happy:
for p in ph: p.remove()
return pts
开发者ID:ckhroulev,项目名称:dbg-playground,代码行数:32,代码来源:test.py
示例16: Main
def Main():
save_file_root = "C:\\Users\\Camera\\Desktop\\Video_Editing_Tools\\Analysis\\Set 8\\Limb_Movement"
load_video_filename = "C:\\Users\\Camera\\Desktop\\Video_Editing_Tools\\Analysis\\Set 8\\Median.avi"
# load_video_filename = "C:\\Users\\Camera\\Desktop\\GtHUb\\Two-Cameras\\Data\\AG052014-01\\21072014\\Trial5\\PS3_Vid83.avi"
cap = cv2.VideoCapture(load_video_filename)
ret, prvs = cap.read()
prvs = cv2.cvtColor(prvs, cv2.COLOR_BGR2GRAY)
frames = int(cap.get(cv.CV_CAP_PROP_FRAME_COUNT))
# Select mask in which we should look for the initial good points to track - a.k.a. select limb to track
pl.figure()
pl.title("Select mask")
pl.imshow(prvs, cmap=mpl_cm.Greys_r)
pts = []
while not len(pts):
pts = pl.ginput(0)
pl.close()
path = mpl_path.Path(pts)
mask = np.zeros(np.shape(prvs), dtype=np.uint8)
for ridx, row in enumerate(mask):
for cidx, pt in enumerate(row):
if path.contains_point([cidx, ridx]):
mask[ridx, cidx] = 1
for n in range(frames):
ret, next = cap.read()
next = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, 0.5, 3, 10, 3, 5, 1.2, 0)
distt = 3
threshold = 100
new_mask = np.zeros(np.shape(prvs), dtype=np.uint8)
for ridx, row in enumerate(mask):
for cidx, pt in enumerate(row):
if mask[ridx, cidx] == 1:
y = ridx + int(round(flow[ridx, cidx, 1]))
# print int(round(flow[ridx,cidx,1]))
x = cidx + int(round(flow[ridx, cidx, 0]))
if int(round(flow[ridx, cidx, 1])) < 10 and int(round(flow[ridx, cidx, 0])) < 10:
tmask = next[y - distt : y + distt + 1, x - distt : x + distt + 1]
# print tmask
idx = np.where(tmask > threshold)
# print idx[0]+y-distt
new_mask[idx[0] + y - distt, idx[1] + x - distt] = 1
mask = new_mask
next *= new_mask
cv2.imwrite("C:\\Users\\Camera\\Desktop\\TEST.png", next)
cv2.imshow("Limb", next)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
prvs = next
cv2.destroyAllWindows()
cap.release()
开发者ID:jfond,项目名称:MPA,代码行数:60,代码来源:DenseLimbTracker.py
示例17: select_valid_region_topView
def select_valid_region_topView(frame_input):
inputFig = plt.figure()
plt.title('Select points to define valid regions:top View')
plt.imshow(frame_input, cmap = 'gray', interpolation = 'bicubic')
# Same as ginput() in Matlab
region_ptList = ginput(8)
plt.close(inputFig)
region_ptArray0 = np.asarray(region_ptList)
region_ptArray = region_ptArray0.astype(int)
region_ptArray = np.vstack((region_ptArray, region_ptArray[0,:]))
# Get the arms A,B,C,U
pointsLeft = region_ptArray[0:4,:]
maskLeft0 = np.zeros(frame_input.shape[:2],dtype = 'uint8')
cv2.drawContours(maskLeft0,[pointsLeft],0,255,-1)
maskLeft0 = cv2.cvtColor(maskLeft0,cv2.COLOR_GRAY2BGR)
maskLeft = cv2.cvtColor(maskLeft0,cv2.COLOR_BGR2GRAY)
pointsRight = region_ptArray[4:8,:]
maskRight0 = np.zeros(frame_input.shape[:2],dtype = 'uint8')
cv2.drawContours(maskRight0,[pointsRight],0,255,-1)
maskRight0 = cv2.cvtColor(maskRight0,cv2.COLOR_GRAY2BGR)
maskRight = cv2.cvtColor(maskRight0,cv2.COLOR_BGR2GRAY)
return maskLeft0, maskLeft, maskRight0, maskRight
开发者ID:pangjc,项目名称:Python_activity_recognition,代码行数:26,代码来源:rat_video_segmentation_topView_GUI1.py
示例18: fit_and_plot
def fit_and_plot(self, image):
print "fit and plot"
p = self.parameters.IonsOnCamera
x_axis = np.arange(p.horizontal_min, p.horizontal_max + 1, self.image_region[0])
y_axis = np.arange(p.vertical_min, p.vertical_max + 1, self.image_region[1])
xx, yy = np.meshgrid(x_axis, y_axis)
#import IPython
#IPython.embed()
#self.fitter.report(params)
#ideally graphing should be done by saving to data vault and using the grapher
#p = Process(target = self.fitter.graph, args = (x_axis, y_axis, image, params, result))
#p.start()
pylab.figure()
pylab.imshow(image)
pylab.show()
pylab.figure()
img_1d = np.sum(image, axis=0)
pylab.plot(img_1d)
threshold = pylab.ginput(0)
threshold = threshold[0][1]
print threshold
ion_edges = self.isolate_ions(img_1d, threshold)
centers = self.fit_individual_ions(img_1d, ion_edges)
deltax = []
for i, x in enumerate(ion_edges):
try: deltax.append( centers[i+1] - centers[i] )
except: pass
pylab.figure()
pylab.plot(deltax, 'o')
pylab.show()
开发者ID:HaeffnerLab,项目名称:cct,代码行数:33,代码来源:detect_ion_spacing.py
示例19: get_wavelimits
def get_wavelimits(self, qrspeaks, leads=range(12)):
"""
Given qrspeaks / point on qrs,
interactively, obtain qrs onset, end and tend
leads is a list of the indices of ECG leads
"""
ax = pylab.subplot(111)
ax.set_title("Pick QRS onset, end and T end")
#ax = matplotlib.pyplot.axes()
meanrr = int(scipy.mean(qrspeaks[1:] - qrspeaks[:-1]))
onems = int(self.samplingrate / 1000)
r = qrspeaks[int(len(qrspeaks) * 2/3)] # choose a beat 2/3 of way
start = r - 200 * onems # 400 ms before
end = start + meanrr
for l in leads:
ax.plot(self.data[start:end, l])
cursor = Cursor(ax)
pts = pylab.ginput(3)
q, s, t = [pt[0] for pt in pts]
#pylab.show()
qrsonsets = qrspeaks + int(q - 200 * onems)
qrsends = qrspeaks + int(s - 200 * onems)
tends = qrspeaks + int(t - 200 * onems)
return qrsonsets, qrsends, tends
开发者ID:Basildcruz,项目名称:ecgtk,代码行数:28,代码来源:ecgtk.py
示例20: _DeleteTraces_fired
def _DeleteTraces_fired(self):
plt.figure()
plt.contourf(Data.wavelength, Data.time, Data.TrA_Data, 100)
plt.title('Pick between wavelength to delete (left to right)')
plt.xlabel('Wavelength')
plt.ylabel('Time')
fittingto = np.array(ginput(2))
plt.show()
plt.close()
index_wavelength_left=(np.abs(Data.wavelength-fittingto[0,0])).argmin()
index_wavelength_right=(np.abs(Data.wavelength-fittingto[1,0])).argmin()+1
if index_wavelength_right <= index_wavelength_left:
hold = index_wavelength_left
index_wavelength_left = index_wavelength_right
index_wavelength_right = hold
if index_wavelength_left == 0:
Data.TrA_Data = Data.TrA_Data[:,index_wavelength_right:]
Data.wavelength = Data.wavelength[index_wavelength_right:]
if index_wavelength_right == Data.wavelength.shape:
Data.TrA_Data = Data.TrA_Data[:,:index_wavelength_left]
Data.wavelength = Data.wavelength[:index_wavelength_left]
if index_wavelength_left != 0 & index_wavelength_right != Data.wavelength.shape:
Data.TrA_Data = np.hstack((Data.TrA_Data[:,:index_wavelength_left],Data.TrA_Data[:,index_wavelength_right:]))
Data.wavelength = np.hstack((Data.wavelength[:index_wavelength_left],Data.wavelength[index_wavelength_right:]))
self.log = "%s \nDeleted traces between %s and %s" %(self.log,fittingto[0,0],fittingto[1,0])
开发者ID:Margauxair,项目名称:PyTrA,代码行数:32,代码来源:PyTrA.py
注:本文中的pylab.ginput函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论