本文整理汇总了C#中Emgu.CV.Mat类的典型用法代码示例。如果您正苦于以下问题:C# Mat类的具体用法?C# Mat怎么用?C# Mat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Mat类属于Emgu.CV命名空间,在下文中一共展示了Mat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: EdgeDetectionWindow
public EdgeDetectionWindow(Mat sourceImage)
{
_sourceImage = sourceImage;
InitializeComponent();
DataContext = this;
BitmapSource = MainWindow.ToBitmapSource(sourceImage);
}
开发者ID:chovik,项目名称:master-thesis,代码行数:7,代码来源:EdgeDetectionWindow.xaml.cs
示例2: image1_Initialized
private void image1_Initialized(object sender, EventArgs e)
{
Mat image = new Mat(100, 400, DepthType.Cv8U, 3);
image.SetTo(new Bgr(255, 255, 255).MCvScalar);
CvInvoke.PutText(image, "Hello, world", new System.Drawing.Point(10, 50), Emgu.CV.CvEnum.FontFace.HersheyPlain, 3.0, new Bgr(255.0, 0.0, 0.0).MCvScalar);
image1.Source = BitmapSourceConvert.ToBitmapSource(image);
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:8,代码来源:Window1.xaml.cs
示例3: ImageGrabbedHandler
public override void ImageGrabbedHandler(object sender, EventArgs e)
{
using (var matCaptured = new Mat())
{
CameraCapture.Retrieve(matCaptured);
}
}
开发者ID:neutmute,项目名称:PiCamCV,代码行数:7,代码来源:NoopRunner.cs
示例4: FindHomography
/// <summary>
/// Finds perspective transformation H=||h_ij|| between the source and the destination planes
/// </summary>
/// <param name="srcPoints">Point coordinates in the original plane</param>
/// <param name="dstPoints">Point coordinates in the destination plane</param>
/// <param name="homography">The output homography matrix</param>
/// <param name="method">FindHomography method</param>
/// <param name="ransacReprojThreshold">
/// The maximum allowed reprojection error to treat a point pair as an inlier.
/// The parameter is only used in RANSAC-based homography estimation.
/// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
/// </param>
/// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
/// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
public static void FindHomography(
PointF[] srcPoints,
PointF[] dstPoints,
IOutputArray homography,
CvEnum.HomographyMethod method,
double ransacReprojThreshold = 3,
IOutputArray mask = null)
{
GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
try
{
using (
Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
using (
Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
{
CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
}
}
finally
{
srcHandle.Free();
dstHandle.Free();
}
}
开发者ID:Warren-GH,项目名称:emgucv,代码行数:40,代码来源:CvInvokeCalib3d.cs
示例5: ProcessImage
private void ProcessImage(Mat image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Mat> stopSignList = new List<Mat>();
List<Rectangle> stopSignBoxList = new List<Rectangle>();
_stopSignDetector.DetectStopSign(image, stopSignList, stopSignBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("Stop Sign Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < stopSignList.Count; i++)
{
Rectangle rect = stopSignBoxList[i];
AddLabelAndImage(
ref startPoint,
String.Format("Stop Sign [{0},{1}]:", rect.Location.Y + rect.Width / 2, rect.Location.Y + rect.Height / 2),
stopSignList[i]);
CvInvoke.Rectangle(image, rect, new Bgr(Color.Aquamarine).MCvScalar, 2);
}
imageBox1.Image = image;
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:27,代码来源:TrafficSignRecognitionForm.cs
示例6: processFrameAndUpdateGUI
///////////////////////////////////////////////////////////////////////////////////////////
void processFrameAndUpdateGUI(object sender, EventArgs arg) {
Mat imgOriginal;
imgOriginal = capWebcam.QueryFrame();
if(imgOriginal == null) {
MessageBox.Show("unable to read frame from webcam" + Environment.NewLine + Environment.NewLine +
"exiting program");
Environment.Exit(0);
return;
}
Mat imgGrayscale = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
Mat imgBlurred = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
Mat imgCanny = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
CvInvoke.CvtColor(imgOriginal, imgGrayscale, ColorConversion.Bgr2Gray);
CvInvoke.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 1.5);
CvInvoke.Canny(imgBlurred, imgCanny, 100, 200);
ibOriginal.Image = imgOriginal;
ibCanny.Image = imgCanny;
}
开发者ID:RaveenAthapaththu,项目名称:OpenCV_3_Windows_10_Installation_Tutorial,代码行数:26,代码来源:CannyWebcam.cs
示例7: Main
static void Main(string[] args)
{
MCvPoint3D32f[] _points;
Mat _left = CvInvoke.Imread("imL.png", ImreadModes.Color);
Mat _right = CvInvoke.Imread("imR.png", ImreadModes.Color);
Mat disparityMap = new Mat();
Stopwatch watch = Stopwatch.StartNew();
UMat leftGray = new UMat();
UMat rightGray = new UMat();
CvInvoke.CvtColor(_left, leftGray, ColorConversion.Bgr2Gray);
CvInvoke.CvtColor(_right, rightGray, ColorConversion.Bgr2Gray);
Mat points = new Mat();
Computer3DPointsFromStereoPair(leftGray, rightGray, disparityMap, points);
watch.Stop();
long disparityComputationTime = watch.ElapsedMilliseconds;
Mat pointsArray = points.Reshape(points.NumberOfChannels, points.Rows*points.Cols);
Mat colorArray = _left.Reshape(_left.NumberOfChannels, _left.Rows*_left.Cols);
Mat colorArrayFloat = new Mat();
colorArray.ConvertTo(colorArrayFloat, DepthType.Cv32F);
WCloud cloud = new WCloud(pointsArray, colorArray);
Emgu.CV.Viz3d v = new Emgu.CV.Viz3d("Simple stereo reconstruction");
WText wtext = new WText("3d point cloud", new System.Drawing.Point(20, 20), 20, new MCvScalar(255, 255, 255));
WCoordinateSystem wCoordinate = new WCoordinateSystem(1.0);
v.ShowWidget("text", wtext);
//v.ShowWidget("coordinate", wCoordinate);
v.ShowWidget("cloud", cloud);
v.Spin();
}
开发者ID:neutmute,项目名称:emgucv,代码行数:31,代码来源:Program.cs
示例8: ProcessFrame
/// <summary>
/// Frame Process Function called on Image Grabbed Event
/// </summary>
protected override void ProcessFrame(object sender, EventArgs e)
{
//Unsubscribe to stop receiving events
_capture.ImageGrabbed -= ProcessFrame;
try
{
Mat f = new Mat();
_capture.Retrieve(f, 0);
//Get frame from camera
Frame frame = new Frame();
frame.Timestamp = DateTime.Now;
frame.Image = new Image<Gray, byte>(f.Bitmap);
Data = frame;
NotifyAll();
}
catch (Exception ex)
{
Log.Print(String.Format("Failed to deal with frame. Reason: {0}", ex.Message), eCategory.Error, LogTag.IMAGE);
}
finally
{
//Subscribe back to receive events
_capture.ImageGrabbed += ProcessFrame;
}
}
开发者ID:djjosse,项目名称:Foosbot,代码行数:30,代码来源:FrameStreamer.cs
示例9: ProcessFrame
void ProcessFrame(object sender, EventArgs e)
{
Mat frame = _cameraCapture.QueryFrame();
Mat smoothedFrame = new Mat();
CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1); //filter out noises
//frame._SmoothGaussian(3);
#region use the BG/FG detector to find the forground mask
Mat forgroundMask = new Mat();
_fgDetector.Apply(smoothedFrame, forgroundMask);
#endregion
CvBlobs blobs = new CvBlobs();
_blobDetector.Detect(forgroundMask.ToImage<Gray, byte>(), blobs);
blobs.FilterByArea(100, int.MaxValue);
float scale = (frame.Width + frame.Width)/2.0f;
_tracker.Update(blobs, 0.01 * scale, 5, 5);
foreach (var pair in _tracker)
{
CvTrack b = pair.Value;
CvInvoke.Rectangle(frame, b.BoundingBox, new MCvScalar(255.0, 255.0, 255.0), 2);
CvInvoke.PutText(frame, b.Id.ToString(), new Point((int)Math.Round(b.Centroid.X), (int)Math.Round(b.Centroid.Y)), FontFace.HersheyPlain, 1.0, new MCvScalar(255.0, 255.0, 255.0));
}
imageBox1.Image = frame;
imageBox2.Image = forgroundMask;
}
开发者ID:neutmute,项目名称:emgucv,代码行数:29,代码来源:VideoSurveilance.cs
示例10: ProcessFrame
private void ProcessFrame(object sender, EventArgs arg)
{
Mat frame = new Mat(); //Matrix to save the picture
capture.Retrieve(frame, 0); //retrieve the picture to the matrinx
Image<Bgr, byte> image = frame.ToImage<Bgr, byte>();
FaceNo = 0;
if (frame != null)
{
Image<Gray, byte> grayFrame = frame.ToImage<Gray, byte>(); // display the image in the imageBox
faces = cascade.DetectMultiScale(grayFrame, 1.1, 2, new Size(30, 30));
Bitmap BitmapInput = grayFrame.ToBitmap();
Bitmap ExtractedFace;
Graphics FaceCanvas;
//countTable.Text = faces.Count().ToString();
if (faces.Count() > 0)
{
foreach (var face in faces)
{
image.Draw(face, new Bgr(Color.Blue), 1); // draw rectangles in the picture
ExtractedFace = new Bitmap(face.Width, face.Height);
FaceCanvas = Graphics.FromImage(ExtractedFace);
FaceCanvas.DrawImage(BitmapInput, 0, 0, face, GraphicsUnit.Pixel);
ExtFaces.Add(ExtractedFace);
FaceNo++;
}
}
imageBox1.Image = image; // display the image in the imageBox
}
}
开发者ID:elattar,项目名称:FaceAttendanceSystem,代码行数:30,代码来源:VideoCapturing.cs
示例11: ViewDidLoad
public override void ViewDidLoad()
{
base.ViewDidLoad();
ButtonText = "Detect Stop Sign";
OnButtonClick +=
delegate
{
using (Mat stopSignModel = new Mat("stop-sign-model.png"))
using (Mat image = new Mat("stop-sign.jpg"))
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Mat> stopSignList = new List<Mat>();
List<Rectangle> stopSignBoxList = new List<Rectangle>();
StopSignDetector detector = new StopSignDetector(stopSignModel);
detector.DetectStopSign(image, stopSignList, stopSignBoxList);
watch.Stop(); //stop the timer
foreach (Rectangle rect in stopSignBoxList)
{
CvInvoke.Rectangle(image, rect, new MCvScalar(0, 0, 255), 2);
}
Size frameSize = FrameSize;
using (Mat resized = new Mat())
{
CvInvoke.ResizeForFrame(image, resized, frameSize);
MessageText = String.Format("Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
SetImage(resized);
}
}
};
}
开发者ID:neutmute,项目名称:emgucv,代码行数:34,代码来源:TrafficSignRecognitionDialogViewController.cs
示例12: edge
private static Mat edge(Mat I)
{
Mat E = new Mat();
CvInvoke.CvtColor(I,E,ColorConversion.Bgr2Gray);
CvInvoke.Canny(E, E, 40, 80, 3);
return E;
}
开发者ID:inlmouse,项目名称:FaceUnitTest,代码行数:7,代码来源:LowPoly.cs
示例13: ImageGrabbedHandler
public override void ImageGrabbedHandler(object sender, EventArgs e)
{
if (_transmitTask == null || _transmitTask.IsCompleted)
{
using (var matCaptured = new Mat())
{
CameraCapture.Retrieve(matCaptured);
var bgrImage = matCaptured.ToImage<Bgr, byte>();
WriteText(bgrImage, 30, DateTime.Now.ToString("HH:mm:ss tt"));
imageBoxCaptured.Image = bgrImage;
IImageTransmitter transmitter = null;
if (radBsonImage.Checked)
{
transmitter = _imageTransmitter;
}
if (radBsonJpeg.Checked)
{
transmitter = _jpegTransmitter;
}
if (transmitter != null)
{
_transmitTask = transmitter.Transmit(bgrImage);
}
}
}
}
开发者ID:neutmute,项目名称:PiCamCV,代码行数:29,代码来源:ServerProcessingControl.cs
示例14: Computer3DPointsFromStereoPair
/// <summary>
/// Given the left and right image, computer the disparity map and the 3D point cloud.
/// </summary>
/// <param name="left">The left image</param>
/// <param name="right">The right image</param>
/// <param name="outputDisparityMap">The left disparity map</param>
/// <param name="points">The 3D point cloud within a [-0.5, 0.5] cube</param>
private static void Computer3DPointsFromStereoPair(IInputArray left, IInputArray right, Mat outputDisparityMap, Mat points)
{
Size size;
using (InputArray ia = left.GetInputArray())
size = ia.GetSize();
using (StereoBM stereoSolver = new StereoBM())
{
stereoSolver.Compute(left, right, outputDisparityMap);
float scale = Math.Max(size.Width, size.Height);
//Construct a simple Q matrix, if you have a matrix from cvStereoRectify, you should use that instead
using (Matrix<double> q = new Matrix<double>(
new double[,]
{
{1.0, 0.0, 0.0, -size.Width/2}, //shift the x origin to image center
{0.0, -1.0, 0.0, size.Height/2}, //shift the y origin to image center and flip it upside down
{0.0, 0.0, -1.0, 0.0}, //Multiply the z value by -1.0,
{0.0, 0.0, 0.0, scale}
})) //scale the object's coordinate to within a [-0.5, 0.5] cube
{
CvInvoke.ReprojectImageTo3D(outputDisparityMap, points, q, false, DepthType.Cv32F);
}
//points = PointCollection.ReprojectImageTo3D(outputDisparityMap, q);
}
}
开发者ID:neutmute,项目名称:emgucv,代码行数:36,代码来源:Program.cs
示例15: OnCreate
protected override void OnCreate(Bundle bundle)
{
base.OnCreate(bundle);
OnImagePicked += (sender, image) =>
{
using (Mat stopSignModel = new Mat(Assets, "stop-sign-model.png"))
{
if (image == null)
return;
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<Mat> stopSignList = new List<Mat>();
List<Rectangle> stopSignBoxList = new List<Rectangle>();
StopSignDetector detector = new StopSignDetector(stopSignModel);
detector.DetectStopSign(image, stopSignList, stopSignBoxList);
watch.Stop(); //stop the timer
SetMessage(String.Format("Detection time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds));
foreach (Rectangle rect in stopSignBoxList)
CvInvoke.Rectangle(image, rect, new Bgr(System.Drawing.Color.Red).MCvScalar, 2);
SetImageBitmap(image.ToBitmap());
image.Dispose();
}
};
OnButtonClick += (sender, args) =>
{
PickImage("stop-sign.jpg");
};
}
开发者ID:neutmute,项目名称:emgucv,代码行数:34,代码来源:TrafficSignRecognitionActivity.cs
示例16: PeakPattern
public PeakPattern(Mat inImage, double highThresh = 180.0)
{
ImageHeight = inImage.Height;
ImageWidth = inImage.Width;
highThreshold = highThresh;
//Should text a smaller bounding box than the peak finding threshold.
textRect = ProcessingTools.findTextEdge<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold });
//resultList[0] = new BitArray(ImageWidth);
Mat croppedImage = new Mat(inImage, textRect);
//resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
// textRect.Top + textRect.Height / 4));
//Console.WriteLine(textRect.Top + textRect.Height / 2);
//resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
// textRect.Top + textRect.Height / 2));
//resultList.Add(ProcessingTools.testLine<Bgr, double>(inImage, new double[] { highThreshold, highThreshold, highThreshold },
// (int)(textRect.Top + textRect.Height * 0.75)));
resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
textRect.Height / 4));
// Console.WriteLine(textRect.Top + textRect.Height / 2);
resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
textRect.Height / 2));
resultList.Add(ProcessingTools.testLine<Bgr, double>(croppedImage, new double[] { highThreshold, highThreshold, highThreshold },
(int)(textRect.Height * 0.75)));
//Console.WriteLine("Disc using lines {0}, {1}, {2}",
// textRect.Height / 4,
// textRect.Height / 2,
// (int)(textRect.Height * 0.75));
}
开发者ID:DrCognito,项目名称:imageProcessingTestBed,代码行数:31,代码来源:PeakPattern.cs
示例17: Solve
public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
{
var size = left.Size;
using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
{
leftGpu.Upload(left.Mat);
rightGpu.Upload(right.Mat);
algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);
filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);
filteredDisparityGpu.Download(filteredDisparity16S);
CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);
filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));
return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
}
}
开发者ID:rachwal,项目名称:RTM-Tools,代码行数:27,代码来源:GPUDisparitySolver.cs
示例18: DenoiseTVL1
/// <summary>
/// Primal-dual algorithm is an algorithm for solving special types of variational problems (that is, finding a function to minimize some functional).
/// As the image denoising, in particular, may be seen as the variational problem, primal-dual algorithm then can be used to perform
/// denoising and this is exactly what is implemented.
/// </summary>
/// <param name="observations">This array should contain one or more noised versions of the image that is to be restored.</param>
/// <param name="result">Here the denoised image will be stored. There is no need to do pre-allocation of storage space, as it will be automatically allocated, if necessary.</param>
/// <param name="lambda">Corresponds to in the formulas above. As it is enlarged, the smooth (blurred) images are treated more favorably than detailed (but maybe more noised) ones. Roughly speaking, as it becomes smaller, the result will be more blur but more sever outliers will be removed.</param>
/// <param name="niters">Number of iterations that the algorithm will run. Of course, as more iterations as better, but it is hard to quantitatively refine this statement, so just use the default and increase it if the results are poor.</param>
public static void DenoiseTVL1(Mat[] observations, Mat result, double lambda, int niters)
{
using (Util.VectorOfMat vm = new Util.VectorOfMat(observations))
{
cveDenoiseTVL1(vm, result, lambda, niters);
}
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:16,代码来源:CvInvokeOptim.cs
示例19: Run
static void Run()
{
Mat image = new Mat("lena.jpg", LoadImageType.Color); //Read the files as an 8-bit Bgr image
long detectionTime;
List<Rectangle> faces = new List<Rectangle>();
List<Rectangle> eyes = new List<Rectangle>();
//The cuda cascade classifier doesn't seem to be able to load "haarcascade_frontalface_default.xml" file in this release
//disabling CUDA module for now
bool tryUseCuda = false;
bool tryUseOpenCL = true;
DetectFace.Detect(
image, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml",
faces, eyes,
tryUseCuda,
tryUseOpenCL,
out detectionTime);
foreach (Rectangle face in faces)
CvInvoke.Rectangle(image, face, new Bgr(Color.Red).MCvScalar, 2);
foreach (Rectangle eye in eyes)
CvInvoke.Rectangle(image, eye, new Bgr(Color.Blue).MCvScalar, 2);
//display the image
ImageViewer.Show(image, String.Format(
"Completed face and eye detection using {0} in {1} milliseconds",
(tryUseCuda && CudaInvoke.HasCuda) ? "GPU"
: (tryUseOpenCL && CvInvoke.HaveOpenCLCompatibleGpuDevice) ? "OpenCL"
: "CPU",
detectionTime));
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:32,代码来源:Program.cs
示例20: OnImageAvailable
protected void OnImageAvailable(Mat image)
{
EventHandler<ImageAvailableEventArgs> handler = ImageAvailable;
if (handler != null)
{
handler(this, new ImageAvailableEventArgs(image));
}
}
开发者ID:Alovel,项目名称:OpenCV-Face-andmore-Tracker,代码行数:8,代码来源:ImageProcessor.cs
注:本文中的Emgu.CV.Mat类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论