本文整理汇总了C#中MCvScalar类的典型用法代码示例。如果您正苦于以下问题:C# MCvScalar类的具体用法?C# MCvScalar怎么用?C# MCvScalar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MCvScalar类属于命名空间,在下文中一共展示了MCvScalar类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: WarpAffine
/// <summary>
/// Applies an affine transformation to an image.
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="mapMatrix">2x3 transformation matrix</param>
/// <param name="dsize">Size of the output image.</param>
/// <param name="interpMethod">Interpolation method</param>
/// <param name="warpMethod">Warp method</param>
/// <param name="borderMode">Pixel extrapolation method</param>
/// <param name="borderValue">A value used to fill outliers</param>
public static void WarpAffine(IInputArray src, IOutputArray dst, IInputArray mapMatrix, Size dsize, CvEnum.Inter interpMethod = CvEnum.Inter.Linear, CvEnum.Warp warpMethod = CvEnum.Warp.Default, CvEnum.BorderType borderMode = CvEnum.BorderType.Constant, MCvScalar borderValue = new MCvScalar())
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
using (InputArray iaMapMatrix = mapMatrix.GetInputArray())
cveWarpAffine(iaSrc, oaDst, iaMapMatrix, ref dsize, (int)interpMethod | (int)warpMethod, borderMode, ref borderValue);
}
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:CvInvokeImgproc.cs
示例2: FindLargestContour
public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result)
{
int largest_contour_index = 0;
double largest_area = 0;
VectorOfPoint largestContour;
using (Mat hierachy = new Mat())
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
IOutputArray hirarchy;
CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);
for (int i = 0; i < contours.Size; i++)
{
MCvScalar color = new MCvScalar(0, 0, 255);
double a = CvInvoke.ContourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
}
CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
}
CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
}
return largestContour;
}
开发者ID:Neths,项目名称:ReStudio,代码行数:33,代码来源:Form1.cs
示例3: DrawCrosshair
public void DrawCrosshair( int x, int y, MCvScalar color, Image<Bgr, Byte> src )
{
Point point_laser, point_left, point_right, point_top, point_bottom;
/*
point_laser = new Point( x, y );
point_left = new Point( 0, y );
point_top = new Point( x, 0 );
point_right = new Point( CvInvoke.cvGetSize( src ).Width, y );
point_bottom = new Point( x, CvInvoke.cvGetSize( src ).Height );
*/
// If image center tracking is desired
/*
point_laser = new Point( 320, 240 );
point_left = new Point( 0, 240 );
point_top = new Point( 320, 0 );
point_right = new Point( CvInvoke.cvGetSize( src ).Width, 240 );
point_bottom = new Point( 320, CvInvoke.cvGetSize( src ).Height );
*/
// Draw a crosshair centered on the laser pointer on the webcam feed
CvInvoke.cvCircle( src, point_laser, 5, color, 1,
Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
CvInvoke.cvLine( src, point_left, point_right, color, 1,
Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
CvInvoke.cvLine( src, point_top, point_bottom, color, 1,
Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, 0 );
}
开发者ID:hemantyadavgit,项目名称:ARDroneObstacleAvoidance,代码行数:28,代码来源:LaserDetect.cs
示例4: GetScalar
private static MCvScalar GetScalar(double hue, double sat, double value)
{
var scalar = new MCvScalar();
scalar.V0 = hue;
scalar.V1 = sat;
scalar.V2 = value;
return scalar;
}
开发者ID:neutmute,项目名称:PiCamCV,代码行数:8,代码来源:ThresholdSettings.cs
示例5: cvWarpAffine
/// <summary>
/// Transforms source image using the specified matrix
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="mapMatrix">2x3 transformation matrix</param>
/// <param name="flags"> flags </param>
/// <param name="fillval">A value used to fill outliers</param>
#if ANDROID
public static void cvWarpAffine(
IntPtr src,
IntPtr dst,
IntPtr mapMatrix,
int flags,
MCvScalar fillval)
{
cvWarpAffine(src, dst, mapMatrix, flags, fillval.v0, fillval.v1, fillval.v2, fillval.v3);
}
开发者ID:KaganRoman,项目名称:Eval,代码行数:18,代码来源:CvInvokeImgproc.cs
示例6: CudaLaplacianFilter
/// <summary>
/// Create a Laplacian filter.
/// </summary>
/// <param name="ksize">Either 1 or 3</param>
/// <param name="scale">Optional scale. Use 1.0 for default</param>
/// <param name="borderType">The border type.</param>
/// <param name="borderValue">The border value.</param>
public CudaLaplacianFilter(
DepthType srcDepth, int srcChannels,
DepthType dstDepth, int dstChannels,
int ksize = 1, double scale = 1.0,
CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
{
_ptr = CudaInvoke.cudaCreateLaplacianFilter(
CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
ksize, scale, borderType, ref borderValue);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:17,代码来源:CudaLaplacianFilter.cs
示例7: DrawMeasurements
public void DrawMeasurements( int x, int y, double dist, double pfc,
MCvScalar color, Image<Bgr, Byte> src )
{
// Measurement text content, position, and font
string text_size, text_posn, text_pfc, text_dist;
Point point_size, point_posn, point_pfc, point_dist;
MCvFont font = new MCvFont( Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5 );
// Fill size string
text_size = "Size (pix): ";
text_size += Convert.ToString( CvInvoke.cvGetSize( src ).Width );
text_size += ", ";
text_size += Convert.ToString( CvInvoke.cvGetSize( src ).Height );
// Start position, pfc, and distance strings
text_posn = "Position (pix): ";
text_pfc = "PFC (pix): ";
text_dist = "Distance (cm): ";
// If the laser point WAS found
if ( ( x > 0 ) && ( y > 0 ) )
{
// Fill position string
text_posn += Convert.ToString( x );
text_posn += ", ";
text_posn += Convert.ToString( y );
// Fill pfc string
text_pfc += Convert.ToString( pfc );
// Fill distance string
text_dist += String.Format( "{0:F1}", dist );
}
// If the laser pointer was NOT found
else
{
// Fill measurement strings with NULL readings
text_posn += "NULL, NULL";
text_pfc += "NULL";
text_dist += "NULL";
}
// Initialize text position
point_size = new Point( 10, 400 );
point_posn = new Point( 10, 420 );
point_pfc = new Point( 10, 440 );
point_dist = new Point( 10, 460 );
// Draw text on image
CvInvoke.cvPutText( src, text_size, point_size, ref font, color );
CvInvoke.cvPutText( src, text_posn, point_posn, ref font, color );
CvInvoke.cvPutText( src, text_pfc, point_pfc, ref font, color );
CvInvoke.cvPutText( src, text_dist, point_dist, ref font, color );
}
开发者ID:hemantyadavgit,项目名称:ARDroneObstacleAvoidance,代码行数:54,代码来源:LaserDetect.cs
示例8: CudaLinearFilter
/// <summary>
/// Create a Gpu LinearFilter
/// </summary>
/// <param name="kernel">Convolution kernel, single-channel floating point matrix (e.g. Emgu.CV.Matrix). If you want to apply different kernels to different channels, split the gpu image into separate color planes and process them individually</param>
/// <param name="anchor">The anchor of the kernel that indicates the relative position of a filtered point within the kernel. The anchor shoud lie within the kernel. The special default value (-1,-1) means that it is at the kernel center</param>
/// <param name="borderType">Border type. Use REFLECT101 as default.</param>
/// <param name="borderValue">The border value</param>
public CudaLinearFilter(
DepthType srcDepth, int srcChannels,
DepthType dstDepth, int dstChannels,
IInputArray kernel,
System.Drawing.Point anchor,
CvEnum.BorderType borderType = BorderType.Default, MCvScalar borderValue = new MCvScalar())
{
using (InputArray iaKernel = kernel.GetInputArray())
_ptr = CudaInvoke.cudaCreateLinearFilter(
CvInvoke.MakeType(srcDepth, srcChannels), CvInvoke.MakeType(dstDepth, dstChannels),
iaKernel, ref anchor, borderType, ref borderValue);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:19,代码来源:CudaLinearFilter.cs
示例9: DrawCircle
public static void DrawCircle(IInputOutputArray image,
Point center,
int radius,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
}
}
开发者ID:KalinovDmitri,项目名称:OpenCV,代码行数:13,代码来源:CvInvokeDrawing.cs
示例10: DrawLine
public static void DrawLine(IInputOutputArray image,
Point start,
Point end,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
using (InputOutputArray array = image.GetInputOutputArray())
{
cveLine(array, ref start, ref end, ref color, thickness, lineType, shift);
}
}
开发者ID:KalinovDmitri,项目名称:OpenCV,代码行数:13,代码来源:CvInvokeDrawing.cs
示例11: DrawEllipse
public static void DrawEllipse(IInputOutputArray image,
RotatedRect box,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
int shift = 0)
{
int width = (int)Math.Round(box.Size.Height * 0.5F);
int height = (int)Math.Round(box.Size.Width * 0.5F);
Size axesSize = new Size(width, height);
Point center = Point.Round(box.Center);
DrawEllipse(image, center, axesSize, box.Angle, 0.0D, 360.0D, color, thickness, lineType, shift);
}
开发者ID:KalinovDmitri,项目名称:OpenCV,代码行数:14,代码来源:CvInvokeDrawing.cs
示例12: DrawMatches
/// <summary>
/// Draw the matched keypoints between the model image and the observered image.
/// </summary>
/// <param name="modelImage">The model image</param>
/// <param name="modelKeypoints">The keypoints in the model image</param>
/// <param name="observerdImage">The observed image</param>
/// <param name="observedKeyPoints">The keypoints in the observed image</param>
/// <param name="matchColor">The color for the match correspondence lines</param>
/// <param name="singlePointColor">The color for highlighting the keypoints</param>
/// <param name="mask">The mask for the matches. Use null for all matches.</param>
/// <param name="flags">The drawing type</param>
/// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
/// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
public static void DrawMatches(
IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
VectorOfVectorOfDMatch matches,
IInputOutputArray result,
MCvScalar matchColor, MCvScalar singlePointColor,
IInputArray mask = null,
KeypointDrawType flags = KeypointDrawType.Default)
{
using (InputArray iaModelImage = modelImage.GetInputArray())
using (InputArray iaObserverdImage = observerdImage.GetInputArray())
using (InputOutputArray ioaResult = result.GetInputOutputArray())
using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:29,代码来源:Features2DTracker.cs
示例13: FindFaces
public List<FaceScored> FindFaces(Emgu.CV.Image<Emgu.CV.Structure.Bgr, byte> image, CascadeClassifier cascadeClassifierFace, CascadeClassifier cascadeClassifierEye)
{
List<FaceScored> currentFaces = new List<FaceScored>();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>())
{
gray._EqualizeHist();
Size minFaceSize = new Size(minSizeFace , minSizeFace );
Size maxFaceSize = new Size(maxSizeFace , maxSizeFace );
Size minEyeSize = new Size(minSizeEye , minSizeEye );
Size maxEyeSize = new Size(maxSizeEye , maxSizeEye );
Rectangle[] facesDetected = cascadeClassifierFace.DetectMultiScale(gray, scaleFace , neighborsFace , minFaceSize,maxFaceSize);
foreach (Rectangle f in facesDetected)
{
if (f.Width<35)
break;
gray.ROI = f;
Rectangle[] eyesDetected = cascadeClassifierEye.DetectMultiScale(gray, scaleEye, neighborsEye, minEyeSize, maxEyeSize);
if (eyesDetected.Count() >0){
FaceScored faceModel = new FaceScored();
faceModel.FaceImage = gray.Bitmap;
faceModel.FaceImageFullColr = image.GetSubRect(f).Bitmap;
faceModel.Height = faceModel.FaceImage.Height;
faceModel.Width = faceModel.FaceImage.Width;
faceModel.EyesCount = eyesDetected.Count();
Gray avgf = new Gray();
MCvScalar avstd = new MCvScalar();
gray.AvgSdv(out avgf, out avstd);
faceModel.StdDev = avstd.V0;
currentFaces.Add(faceModel);
if(currentFaces.Count%5==0)
Console.WriteLine("FaceDetect Add every 5 faceModel" + faceModel.Width);
break;
}
gray.ROI = Rectangle.Empty;
}
}
return currentFaces;
}
开发者ID:kmacpher67,项目名称:PlantLifeAnimation,代码行数:42,代码来源:FaceDetection.cs
示例14: DrawContours
public static void DrawContours(IInputOutputArray image,
IInputArray contours,
int contourIdx,
MCvScalar color,
int thickness = 1,
LineType lineType = LineType.EightConnected,
IInputArray hierarchy = null,
int maxLevel = int.MaxValue,
Point offset = default(Point))
{
using (InputOutputArray imageArray = image.GetInputOutputArray())
{
using (InputArray contoursArray = contours.GetInputArray())
{
using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
{
cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
}
}
}
}
开发者ID:KalinovDmitri,项目名称:OpenCV,代码行数:21,代码来源:CvInvokeDrawing.cs
示例15: ApplyFilter
public void ApplyFilter(Mat src)
{
CvInvoke.CvtColor(src, src, ColorConversion.Bgr2Hsv);
Mat threshold = new Mat(src.Height, src.Width, src.Depth, src.NumberOfChannels);
MCvScalar min = new MCvScalar(m_hmin, m_smin, m_vmin);
MCvScalar max = new MCvScalar(m_hmax, m_smax, m_vmax);
CvInvoke.InRange(src, new ScalarArray(min), new ScalarArray(max), threshold);
Mat element = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3,3), Point.Empty);
CvInvoke.Erode(threshold, threshold, element, Point.Empty, 1, BorderType.Constant, new MCvScalar(1.0f));
CvInvoke.Canny(threshold, threshold, 100, 255);
VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
Mat hierarchy = new Mat();
CvInvoke.FindContours(threshold, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple, Point.Empty);
Mat draw = new Mat(src.Height, src.Width, src.Depth, 1);
draw.SetTo(new MCvScalar(0.0));
int i = 0;
//Debug.Log("CONTOURS");
var contoursArray = contours.ToArrayOfArray();
foreach(Point[] contour in contoursArray)
{
CvInvoke.DrawContours(draw, contours, i, new MCvScalar(255.0), 1, LineType.EightConnected, null, int.MaxValue, Point.Empty);
double a = CvInvoke.ContourArea(new VectorOfPoint(contour));
//Debug.Log("Contour: " + a);
i++;
}
//Emgu.CV.UI.ImageViewer.Show(draw, "test");
if(m_onFrame != null) m_onFrame.Invoke(draw);
}
开发者ID:AndyGates,项目名称:TrackMyBalls,代码行数:39,代码来源:ThresholdFilter.cs
示例16: Dilate
/// <summary>
/// Dilates the source image using the specified structuring element that determines the shape of a pixel neighborhood over which the maximum is taken
/// The function supports the in-place mode. Dilation can be applied several (iterations) times. In case of color image each channel is processed independently
/// </summary>
/// <param name="src">Source image</param>
/// <param name="dst">Destination image</param>
/// <param name="element">Structuring element used for erosion. If it is IntPtr.Zero, a 3x3 rectangular structuring element is used</param>
/// <param name="iterations">Number of times erosion is applied</param>
/// <param name="borderType">Pixel extrapolation method</param>
/// <param name="borderValue">Border value in case of a constant border </param>
/// <param name="anchor">Position of the anchor within the element; default value (-1, -1) means that the anchor is at the element center.</param>
public static void Dilate(IInputArray src, IOutputArray dst, IInputArray element, Point anchor, int iterations, CvEnum.BorderType borderType, MCvScalar borderValue)
{
using (InputArray iaSrc = src.GetInputArray())
using (OutputArray oaDst = dst.GetOutputArray())
using (InputArray iaElement = element == null ? InputArray.GetEmpty() : element.GetInputArray())
cveDilate(iaSrc, oaDst, iaElement, ref anchor, iterations, borderType, ref borderValue);
}
开发者ID:Warren-GH,项目名称:emgucv,代码行数:18,代码来源:CvInvokeImgproc.cs
示例17: cveRemap
private static extern void cveRemap(IntPtr src, IntPtr dst, IntPtr map1, IntPtr map2, CvEnum.Inter interpolation, CvEnum.BorderType borderMode, ref MCvScalar borderValue);
开发者ID:Warren-GH,项目名称:emgucv,代码行数:1,代码来源:CvInvokeImgproc.cs
示例18: cveRectangle
private static extern void cveRectangle(IntPtr img, ref Rectangle rect, ref MCvScalar color, int thickness, CvEnum.LineType lineType, int shift);
开发者ID:Warren-GH,项目名称:emgucv,代码行数:1,代码来源:CvInvokeImgproc.cs
示例19: Rectangle
/// <summary>
/// Draws a rectangle specified by a CvRect structure
/// </summary>
/// /// <param name="img">Image</param>
/// <param name="rect">The rectangle to be drawn</param>
/// <param name="color">Line color </param>
/// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle.</param>
/// <param name="lineType">Type of the line</param>
/// <param name="shift">Number of fractional bits in the point coordinates</param>
public static void Rectangle(IInputOutputArray img, Rectangle rect, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
{
using (InputOutputArray ioaImg = img.GetInputOutputArray())
cveRectangle(ioaImg, ref rect, ref color, thickness, lineType, shift);
}
开发者ID:Warren-GH,项目名称:emgucv,代码行数:14,代码来源:CvInvokeImgproc.cs
示例20: cvePolylines
private static extern void cvePolylines(
IntPtr img, IntPtr pts,
[MarshalAs(CvInvoke.BoolMarshalType)]
bool isClosed,
ref MCvScalar color,
int thickness, CvEnum.LineType lineType, int shift);
开发者ID:Warren-GH,项目名称:emgucv,代码行数:6,代码来源:CvInvokeImgproc.cs
注:本文中的MCvScalar类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论