本文整理汇总了C#中CvEnum类的典型用法代码示例。如果您正苦于以下问题:C# CvEnum类的具体用法?C# CvEnum怎么用?C# CvEnum使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CvEnum类属于命名空间,在下文中一共展示了CvEnum类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: WarpAffine
/// <summary>
/// Applies an affine transformation to an image.
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="mapMatrix">2x3 transformation matrix</param>
/// <param name="dsize">Size of the output image.</param>
/// <param name="interpMethod">Interpolation method</param>
/// <param name="warpMethod">Warp method</param>
/// <param name="borderMode">Pixel extrapolation method</param>
/// <param name="borderValue">A value used to fill outliers</param>
public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
}
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:CvInvokeImgproc.cs
示例2: cvFindFundamentalMat
public static extern int cvFindFundamentalMat(IntPtr points1,
IntPtr points2,
IntPtr fundamentalMatrix,
CvEnum.CV_FM method,
double param1,
double param2,
IntPtr status);
开发者ID:Rustemt,项目名称:emgu_openCV,代码行数:7,代码来源:CvInvokeCalib3d.cs
示例3: Inpaint
/// <summary>
/// Reconstructs the selected image area from the pixel near the area boundary. The function may be used to remove dust and scratches from a scanned photo, or to remove undesirable objects from still images or video.
/// </summary>
/// <param name="src">The input 8-bit 1-channel or 3-channel image</param>
/// <param name="mask">The inpainting mask, 8-bit 1-channel image. Non-zero pixels indicate the area that needs to be inpainted</param>
/// <param name="dst">The output image of the same format and the same size as input</param>
/// <param name="flags">The inpainting method</param>
/// <param name="inpaintRadius">The radius of circular neighborhood of each point inpainted that is considered by the algorithm</param>
public static void Inpaint(IInputArray src, IInputArray mask, IOutputArray dst, double inpaintRadius, CvEnum.InpaintType flags)
{
using (InputArray iaSrc = src.GetInputArray())
using (InputArray iaMask = mask.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveInpaint(iaSrc, iaMask, oaDst, inpaintRadius, flags);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:15,代码来源:CvInvokePhoto.cs
示例4: FindHomography
/// <summary>
/// Finds perspective transformation H=||h_ij|| between the source and the destination planes
/// </summary>
/// <param name="srcPoints">Point coordinates in the original plane</param>
/// <param name="dstPoints">Point coordinates in the destination plane</param>
/// <param name="homography">The output homography matrix</param>
/// <param name="method">FindHomography method</param>
/// <param name="ransacReprojThreshold">
/// The maximum allowed reprojection error to treat a point pair as an inlier.
/// The parameter is only used in RANSAC-based homography estimation.
/// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
/// </param>
/// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
/// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
public static void FindHomography(
PointF[] srcPoints,
PointF[] dstPoints,
IOutputArray homography,
CvEnum.HomographyMethod method,
double ransacReprojThreshold = 3,
IOutputArray mask = null)
{
GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
try
{
using (
Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
using (
Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
{
CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
}
}
finally
{
srcHandle.Free();
dstHandle.Free();
}
}
开发者ID:neutmute,项目名称:emgucv,代码行数:40,代码来源:CvInvokeCalib3d.cs
示例5: Imdecode
/// <summary>
/// Decode image stored in the buffer
/// </summary>
/// <param name="buf">The buffer</param>
/// <param name="loadType">The image loading type</param>
/// <param name="dst">The output placeholder for the decoded matrix.</param>
public static void Imdecode(byte[] buf, CvEnum.LoadImageType loadType, Mat dst)
{
using (VectorOfByte vb = new VectorOfByte(buf))
{
Imdecode(vb, loadType, dst);
}
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:13,代码来源:CvInvokeImgcodecs.cs
示例6: cvFindHomography
public static extern int cvFindHomography(
IntPtr srcPoints,
IntPtr dstPoints,
IntPtr homography,
CvEnum.HOMOGRAPHY_METHOD method,
double ransacReprojThreshold,
IntPtr mask);
开发者ID:Rustemt,项目名称:emgu_openCV,代码行数:7,代码来源:CvInvokeCalib3d.cs
示例7: AllocateData
private IntPtr AllocateData(CvEnum.DepthType type, int channels, int totalInBytes)
{
FreeData();
switch (type)
{
//case CvEnum.DepthType.Cv8U:
// _data = new byte[totalInBytes];
// break;
case CvEnum.DepthType.Cv8S:
_data = new SByte[totalInBytes];
break;
case CvEnum.DepthType.Cv16U:
_data = new UInt16[totalInBytes >> 1];
break;
case CvEnum.DepthType.Cv16S:
_data = new Int16[totalInBytes >> 1];
break;
case CvEnum.DepthType.Cv32S:
_data = new Int32[totalInBytes >> 2];
break;
case CvEnum.DepthType.Cv32F:
_data = new float[totalInBytes >> 2];
break;
case CvEnum.DepthType.Cv64F:
_data = new double[totalInBytes >> 3];
break;
default:
_data = new byte[totalInBytes];
break;
}
_dataHandle = GCHandle.Alloc(_data, GCHandleType.Pinned);
return _dataHandle.AddrOfPinnedObject();
}
开发者ID:Delaley,项目名称:emgucv,代码行数:35,代码来源:MatDataAllocator.cs
示例8: Farneback
/// <summary>
/// Computes dense optical flow using Gunnar Farneback's algorithm
/// </summary>
/// <param name="prev0">The first 8-bit single-channel input image</param>
/// <param name="next0">The second input image of the same size and the same type as prevImg</param>
/// <param name="flowX">The computed flow image for x-velocity; will have the same size as prevImg</param>
/// <param name="flowY">The computed flow image for y-velocity; will have the same size as prevImg</param>
/// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
/// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
/// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
/// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
/// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
/// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
/// <param name="flags">The operation flags</param>
public static void Farneback(
Image<Gray, Byte> prev0,
Image<Gray, Byte> next0,
Image<Gray, Single> flowX,
Image<Gray, Single> flowY,
double pyrScale,
int levels,
int winSize,
int iterations,
int polyN,
double polySigma,
CvEnum.OPTICALFLOW_FARNEBACK_FLAG flags)
{
IntPtr flow0 = CvInvoke.cvCreateImage(prev0.Size, Emgu.CV.CvEnum.IPL_DEPTH.IPL_DEPTH_32F, 2);
try
{
if ((int) (flags & Emgu.CV.CvEnum.OPTICALFLOW_FARNEBACK_FLAG.USE_INITIAL_FLOW) != 0)
{ //use initial flow
CvInvoke.cvMerge(flowX.Ptr, flowY.Ptr, IntPtr.Zero, IntPtr.Zero, flow0);
}
CvInvoke.cvCalcOpticalFlowFarneback(prev0, next0, flow0, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
CvInvoke.cvSplit(flow0, flowX.Ptr, flowY.Ptr, IntPtr.Zero, IntPtr.Zero);
}
finally
{
CvInvoke.cvReleaseImage(ref flow0);
}
}
开发者ID:wendellinfinity,项目名称:ShoulderSurferAlert,代码行数:43,代码来源:OpticalFlow.cs
示例9: cveGrabCut
private static extern void cveGrabCut(
IntPtr img,
IntPtr mask,
ref Rectangle rect,
IntPtr bgdModel,
IntPtr fgdModel,
int iterCount,
CvEnum.GrabcutInitType type);
开发者ID:reidblomquist,项目名称:emgucv,代码行数:8,代码来源:CvInvokeCvextern.cs
示例10: cvAdaptiveThreshold
public static extern void cvAdaptiveThreshold(
IntPtr src,
IntPtr dst,
double maxValue,
CvEnum.ADAPTIVE_THRESHOLD_TYPE adaptiveType,
CvEnum.THRESH thresholdType,
int blockSize,
double param1);
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:8,代码来源:CvInvokeCv.cs
示例11: CvGrabCut
public extern static void CvGrabCut(
IntPtr img,
IntPtr mask,
ref Rectangle rect,
IntPtr bgdModel,
IntPtr fgdModel,
int iterCount,
CvEnum.GRABCUT_INIT_TYPE type);
开发者ID:KaganRoman,项目名称:Eval,代码行数:8,代码来源:CvInvokeCvextern.cs
示例12: cvHaarDetectObjects
public static extern IntPtr cvHaarDetectObjects(
IntPtr image,
IntPtr cascade,
IntPtr storage,
double scaleFactor,
int minNeighbors,
CvEnum.HAAR_DETECTION_TYPE flags,
Size minSize);
开发者ID:wendellinfinity,项目名称:ShoulderSurferAlert,代码行数:8,代码来源:CvInvokeObjdetect.cs
示例13: CudaSobelFilter
/// <summary>
/// Create a Sobel filter.
/// </summary>
/// <param name="dx">Order of the derivative x</param>
/// <param name="dy">Order of the derivative y</param>
/// <param name="ksize">Size of the extended Sobel kernel</param>
/// <param name="scale">Optional scale, use 1 for default.</param>
/// <param name="rowBorderType">The row border type.</param>
/// <param name="columnBorderType">The column border type.</param>
public CudaSobelFilter(
DepthType srcDepth, int srcChannels,
DepthType dstDepth, int dstChannels,
int dx, int dy, int ksize = 3, double scale = 1.0,
CvEnum.BorderType rowBorderType = BorderType.Default, CvEnum.BorderType columnBorderType = BorderType.NegativeOne)
{
_ptr = CudaInvoke.cudaCreateSobelFilter(CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
dx, dy, ksize, scale, rowBorderType, columnBorderType);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:18,代码来源:CudaSobelFilter.cs
示例14: cvCalibrateCamera2
public static extern double cvCalibrateCamera2(
IntPtr objectPoints,
IntPtr imagePoints,
IntPtr pointCounts,
Size imageSize,
IntPtr intrinsicMatrix,
IntPtr distortionCoeffs,
IntPtr rotationVectors,
IntPtr translationVectors,
CvEnum.CALIB_TYPE flags);
开发者ID:fajoy,项目名称:RTSPExample,代码行数:10,代码来源:CvInvokeCalib3d.cs
示例15: JointBilateralFilter
/// <summary>
/// Applies the joint bilateral filter to an image.
/// </summary>
/// <param name="joint">Joint 8-bit or floating-point, 1-channel or 3-channel image.</param>
/// <param name="src">Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.</param>
/// <param name="dst">Destination image of the same size and type as src .</param>
/// <param name="d">Diameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .</param>
/// <param name="sigmaColor">Filter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.</param>
/// <param name="sigmaSpace">Filter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .</param>
/// <param name="borderType">Border type</param>
public static void JointBilateralFilter(
IInputArray joint, IInputArray src, IOutputArray dst, int d,
double sigmaColor, double sigmaSpace, CvEnum.BorderType borderType = BorderType.Reflect101)
{
using (InputArray iaJoint = joint.GetInputArray())
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveJointBilateralFilter(iaJoint, iaSrc,
oaDst, d, sigmaColor, sigmaSpace, borderType);
}
开发者ID:neutmute,项目名称:emgucv,代码行数:20,代码来源:XImgprocInvoke.cs
示例16: CudaLaplacianFilter
/// <summary>
/// Create a Laplacian filter.
/// </summary>
/// <param name="ksize">Either 1 or 3</param>
/// <param name="scale">Optional scale. Use 1.0 for default</param>
/// <param name="borderType">The border type.</param>
/// <param name="borderValue">The border value.</param>
public CudaLaplacianFilter(
DepthType srcDepth, int srcChannels,
DepthType dstDepth, int dstChannels,
int ksize = 1, double scale = 1.0,
CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
{
_ptr = CudaInvoke.cudaCreateLaplacianFilter(
CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
ksize, scale, borderType, ref borderValue);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:17,代码来源:CudaLaplacianFilter.cs
示例17: EdgePreservingFilter
/// <summary>
/// Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing filters are used in many different applications.
/// </summary>
/// <param name="src">Input 8-bit 3-channel image</param>
/// <param name="dst">Output 8-bit 3-channel image</param>
/// <param name="flags">Edge preserving filters</param>
/// <param name="sigmaS">Range between 0 to 200</param>
/// <param name="sigmaR">Range between 0 to 1</param>
public static void EdgePreservingFilter(
IInputArray src, IOutputArray dst,
CvEnum.EdgePreservingFilterFlag flags = CvEnum.EdgePreservingFilterFlag.RecursFilter,
float sigmaS = 60.0f,
float sigmaR = 0.4f)
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
cveEdgePreservingFilter(iaSrc, oaDst, flags, sigmaS, sigmaR);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:18,代码来源:CvInvokePhoto.cs
示例18: cvCalcOpticalFlowFarneback
public extern static void cvCalcOpticalFlowFarneback(
IntPtr prev0,
IntPtr next0,
IntPtr flow0,
double pyrScale,
int levels,
int winSize,
int iterations,
int polyN,
double polySigma,
CvEnum.OPTICALFLOW_FARNEBACK_FLAG flags);
开发者ID:KaganRoman,项目名称:Eval,代码行数:11,代码来源:CvInvokeVideo.cs
示例19: CudaGaussianFilter
/// <summary>
/// Create a Gaussian filter.
/// </summary>
/// <param name="ksize">The size of the kernel</param>
/// <param name="sigma1">This parameter may specify Gaussian sigma (standard deviation). If it is zero, it is calculated from the kernel size.</param>
/// <param name="sigma2">In case of non-square Gaussian kernel the parameter may be used to specify a different (from param3) sigma in the vertical direction. Use 0 for default</param>
/// <param name="rowBorderType">The row border type.</param>
/// <param name="columnBorderType">The column border type.</param>
/// <param name="srcDepth">The depth type of the source image</param>
/// <param name="srcChannels">The number of channels in the source image</param>
/// <param name="dstDepth">The depth type of the destination image</param>
/// <param name="dstChannels">The number of channels in the destination image</param>
public CudaGaussianFilter(
DepthType srcDepth, int srcChannels,
DepthType dstDepth, int dstChannels,
Size ksize,
double sigma1, double sigma2 = 0,
CvEnum.BorderType rowBorderType = BorderType.Default, CvEnum.BorderType columnBorderType = BorderType.NegativeOne)
{
_ptr = CudaInvoke.cudaCreateGaussianFilter(
CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
ref ksize, sigma1, sigma2, (int)rowBorderType, (int)columnBorderType);
}
开发者ID:neutmute,项目名称:emgucv,代码行数:23,代码来源:CudaGaussianFilter.cs
示例20: CalibrateCamera
/// <summary>
/// Estimates intrinsic camera parameters and extrinsic parameters for each of the views
/// </summary>
/// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
/// <param name="imagePoints">The 2D image location of the points. The first index is the index of the image, second index is the index of the point</param>
/// <param name="imageSize">The size of the image, used only to initialize intrinsic camera matrix</param>
/// <param name="intrinsicParam">The intrisinc parameters, might contains some initial values. The values will be modified by this function.</param>
/// <param name="flags">Flags</param>
/// <param name="extrinsicParams">The output array of extrinsic parameters.</param>
/// <returns>The final reprojection error</returns>
public static double CalibrateCamera(
MCvPoint3D32f[][] objectPoints,
PointF[][] imagePoints,
Size imageSize,
IntrinsicCameraParameters intrinsicParam,
CvEnum.CALIB_TYPE flags,
out ExtrinsicCameraParameters[] extrinsicParams)
{
Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
int imageCount = objectPoints.Length;
#region get the array that represent the point counts
int[] pointCounts = new int[objectPoints.Length];
for (int i = 0; i < objectPoints.Length; i++)
{
Debug.Assert(objectPoints[i].Length == imagePoints[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
pointCounts[i] = objectPoints[i].Length;
}
#endregion
double reprojectionError = -1;
using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
using (Matrix<float> imagePointMatrix = ToMatrix(imagePoints))
using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
using (Matrix<double> rotationVectors = new Matrix<double>(imageCount, 3))
using (Matrix<double> translationVectors = new Matrix<double>(imageCount, 3))
{
reprojectionError = CvInvoke.cvCalibrateCamera2(
objectPointMatrix.Ptr,
imagePointMatrix.Ptr,
pointCountsMatrix.Ptr,
imageSize,
intrinsicParam.IntrinsicMatrix,
intrinsicParam.DistortionCoeffs,
rotationVectors,
translationVectors,
flags);
extrinsicParams = new ExtrinsicCameraParameters[imageCount];
IntPtr matPtr = Marshal.AllocHGlobal(StructSize.MCvMat);
for (int i = 0; i < imageCount; i++)
{
ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
CvInvoke.cvGetRow(rotationVectors.Ptr, matPtr, i);
CvInvoke.cvTranspose(matPtr, p.RotationVector.Ptr);
CvInvoke.cvGetRow(translationVectors.Ptr, matPtr, i);
CvInvoke.cvTranspose(matPtr, p.TranslationVector.Ptr);
extrinsicParams[i] = p;
}
Marshal.FreeHGlobal(matPtr);
}
return reprojectionError;
}
开发者ID:genecyber,项目名称:PredatorCV,代码行数:63,代码来源:CameraCalibration.cs
注:本文中的CvEnum类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论