本文整理汇总了C#中Mat类的典型用法代码示例。如果您正苦于以下问题:C# Mat类的具体用法?C# Mat怎么用?C# Mat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Mat类属于命名空间,在下文中一共展示了Mat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: StopSignDetector
public StopSignDetector(IInputArray stopSignModel)
{
_detector = new SURF(500);
using (Mat redMask = new Mat())
{
GetRedPixelMask(stopSignModel, redMask);
_modelKeypoints = new VectorOfKeyPoint();
_modelDescriptors = new Mat();
_detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
if (_modelKeypoints.Size == 0)
throw new Exception("No image feature has been found in the stop sign model");
}
_modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
_modelDescriptorMatcher.Add(_modelDescriptors);
_octagon = new VectorOfPoint(
new Point[]
{
new Point(1, 0),
new Point(2, 0),
new Point(3, 1),
new Point(3, 2),
new Point(2, 3),
new Point(1, 3),
new Point(0, 2),
new Point(0, 1)
});
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:30,代码来源:StopSignDetector.cs
示例2: Start
// Use this for initialization
void Start()
{
String[] textureNames = new string[] { "stitch1", "stitch2", "stitch3", "stitch4"};
Mat[] imgs = new Mat[textureNames.Length];
Mat tmp = new Mat ();
for (int i = 0; i < textureNames.Length; i++) {
Texture2D tex = Resources.Load<Texture2D>(textureNames[i]);
imgs [i] = new Mat ();
TextureConvert.Texture2dToOutputArray(tex, tmp);
CvInvoke.Flip(tmp, tmp, FlipType.Vertical);
CvInvoke.CvtColor (tmp, imgs [i], ColorConversion.Bgra2Bgr);
if (imgs [i].IsEmpty)
Debug.Log ("Image " + i + " is empty");
else
Debug.Log ("Image " + i + " is " + imgs[i].NumberOfChannels + " channels " + imgs [i].Width + "x" + imgs [i].Height);
}
Emgu.CV.Stitching.Stitcher stitcher = new Emgu.CV.Stitching.Stitcher (false);
Mat result = new Mat ();
using (VectorOfMat vms = new VectorOfMat (imgs))
stitcher.Stitch (vms, result);
//CvInvoke.Flip(result, result, FlipType.Vertical);
Texture2D texture = TextureConvert.InputArrayToTexture2D(result, FlipType.Vertical);
this.GetComponent<GUITexture>().texture = texture;
Size s = result.Size;
this.GetComponent<GUITexture>().pixelInset = new Rect(-s.Width / 2, -s.Height / 2, s.Width, s.Height);
}
开发者ID:neutmute,项目名称:emgucv,代码行数:30,代码来源:Stitch.cs
示例3: MatchBySurf
private void MatchBySurf(Mat src1, Mat src2)
{
var gray1 = new Mat();
var gray2 = new Mat();
Cv2.CvtColor(src1, gray1, ColorConversion.BgrToGray);
Cv2.CvtColor(src2, gray2, ColorConversion.BgrToGray);
var surf = new SURF(500, 4, 2, true);
// Detect the keypoints and generate their descriptors using SURF
KeyPoint[] keypoints1, keypoints2;
var descriptors1 = new MatOfFloat();
var descriptors2 = new MatOfFloat();
surf.Run(gray1, null, out keypoints1, descriptors1);
surf.Run(gray2, null, out keypoints2, descriptors2);
// Match descriptor vectors
var bfMatcher = new BFMatcher(NormType.L2, false);
var flannMatcher = new FlannBasedMatcher();
DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);
// Draw matches
var bfView = new Mat();
Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
var flannView = new Mat();
Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);
using (new Window("SURF matching (by BFMather)", WindowMode.AutoSize, bfView))
using (new Window("SURF matching (by FlannBasedMatcher)", WindowMode.AutoSize, flannView))
{
Cv2.WaitKey();
}
}
开发者ID:0sv,项目名称:opencvsharp,代码行数:35,代码来源:SiftSurfSample.cs
示例4: Run
public void Run()
{
var dst = new Mat(FilePath.Lenna, LoadMode.Color);
var gray = new Mat(FilePath.Lenna, LoadMode.GrayScale);
StarDetector detector = new StarDetector(45);
KeyPoint[] keypoints = detector.Run(gray);
if (keypoints != null)
{
var color = new Scalar(0, 255, 0);
foreach (KeyPoint kpt in keypoints)
{
float r = kpt.Size / 2;
Cv2.Circle(dst, kpt.Pt, (int)r, color, 1, LineType.Link8, 0);
Cv2.Line(dst,
new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r),
color, 1, LineType.Link8, 0);
Cv2.Line(dst,
new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r),
color, 1, LineType.Link8, 0);
}
}
using (new Window("StarDetector features", dst))
{
Cv.WaitKey();
}
}
开发者ID:healtech,项目名称:opencvsharp,代码行数:31,代码来源:StarDetectorSample.cs
示例5: Start
// Use this for initialization
void Start()
{
Texture2D imgTexture = Resources.Load ("chessboard") as Texture2D;
Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3);
Utils.texture2DToMat (imgTexture, imgMat);
Debug.Log ("imgMat dst ToString " + imgMat.ToString ());
Mat grayMat = new Mat ();
Imgproc.cvtColor (imgMat, grayMat, Imgproc.COLOR_RGB2GRAY);
Imgproc.Canny (grayMat, grayMat, 50, 200);
Mat lines = new Mat ();
Imgproc.HoughLinesP (grayMat, lines, 1, Mathf.PI / 180, 50, 50, 10);
// Debug.Log ("lines toStirng " + lines.ToString ());
// Debug.Log ("lines dump" + lines.dump ());
int[] linesArray = new int[lines.cols () * lines.rows () * lines.channels ()];
lines.get (0, 0, linesArray);
for (int i = 0; i < linesArray.Length; i=i+4) {
Core.line (imgMat, new Point (linesArray [i + 0], linesArray [i + 1]), new Point (linesArray [i + 2], linesArray [i + 3]), new Scalar (255, 0, 0), 2);
}
Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (imgMat, texture);
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
开发者ID:prlosana,项目名称:OpenCVForUnity,代码行数:34,代码来源:HoughLinesPSample.cs
示例6: Start
// Use this for initialization
void Start()
{
//Read the left and right images
Texture2D texLeft = Resources.Load ("tsukuba_l") as Texture2D;
Texture2D texRight = Resources.Load ("tsukuba_r") as Texture2D;
Mat imgLeft = new Mat (texLeft.height, texLeft.width, CvType.CV_8UC1);
Mat imgRight = new Mat (texRight.height, texRight.width, CvType.CV_8UC1);
Utils.texture2DToMat (texLeft, imgLeft);
Utils.texture2DToMat (texRight, imgRight);
//or
//Mat imgLeft = Imgcodecs.imread (Utils.getFilePath ("tsukuba_l.png"), Imgcodecs.IMREAD_GRAYSCALE);
//Mat imgRight = Imgcodecs.imread (Utils.getFilePath ("tsukuba_r.png"), Imgcodecs.IMREAD_GRAYSCALE);
Mat imgDisparity16S = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_16S);
Mat imgDisparity8U = new Mat (imgLeft.rows (), imgLeft.cols (), CvType.CV_8UC1);
if (imgLeft.empty () || imgRight.empty ()) {
Debug.Log ("Error reading images ");
}
StereoBM sbm = StereoBM.create (16, 15);
sbm.compute (imgLeft, imgRight, imgDisparity16S);
//normalize to CvType.CV_8U
Core.normalize (imgDisparity16S, imgDisparity8U, 0, 255, Core.NORM_MINMAX, CvType.CV_8U);
Texture2D texture = new Texture2D (imgDisparity8U.cols (), imgDisparity8U.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (imgDisparity8U, texture);
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
开发者ID:EnoxSoftware,项目名称:OpenCVForUnity,代码行数:34,代码来源:StereoBMSample.cs
示例7: Run
public void Run()
{
var capture = new VideoCapture();
capture.Set(CaptureProperty.FrameWidth, 640);
capture.Set(CaptureProperty.FrameHeight, 480);
capture.Open(-1);
if (!capture.IsOpened())
throw new Exception("capture initialization failed");
var fs = FrameSource.CreateCameraSource(-1);
var sr = SuperResolution.CreateBTVL1();
sr.SetInput(fs);
using (var normalWindow = new Window("normal"))
using (var srWindow = new Window("super resolution"))
{
var normalFrame = new Mat();
var srFrame = new Mat();
while (true)
{
capture.Read(normalFrame);
sr.NextFrame(srFrame);
if (normalFrame.Empty() || srFrame.Empty())
break;
normalWindow.ShowImage(normalFrame);
srWindow.ShowImage(srFrame);
Cv2.WaitKey(100);
}
}
}
开发者ID:0sv,项目名称:opencvsharp,代码行数:30,代码来源:SuperResolutionSample.cs
示例8: ConvertToGrayScale
public static Mat ConvertToGrayScale(Mat mat)
{
Mat grayMat = new Mat();
Cv2.CvtColor(mat, grayMat, ColorConversion.RgbToGray);
return grayMat;
}
开发者ID:Muraad,项目名称:DynamoOpenCV,代码行数:7,代码来源:OpenCv.cs
示例9: HDR
private static void HDR()
{
var hdr = CalibrateDebevec.Create();
Mat[] images = new Mat[3];
images[0] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
images[1] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
images[2] = Cv2.ImRead(@"data\lenna.png", ImreadModes.AnyColor);
float[] speeds = new float[3];
speeds[0] = 1;
speeds[1] = 1;
speeds[2] = 1;
Mat dst = new Mat();
hdr.Process(images, dst, speeds);
dst.ToString();
for (int i = 0; i < Math.Max(dst.Rows, dst.Cols); i++)
{
Console.WriteLine(dst.At<float>(i));
}
}
开发者ID:JiphuTzu,项目名称:opencvsharp,代码行数:25,代码来源:Program.cs
示例10: ToBitmapGrayScale
public void ToBitmapGrayScale()
{
Mat img = new Mat(FilePath.Lenna511, LoadMode.GrayScale); // width % 4 != 0
Bitmap bitmap = BitmapConverter2.ToBitmap(img);
// Bitmap bitmap = img.ToBitmap();
using (var form = new Form())
using (var pb = new PictureBox())
{
pb.Image = bitmap;
var size = new System.Drawing.Size(bitmap.Width, bitmap.Height);
pb.ClientSize = size;
form.ClientSize = size;
form.Controls.Add(pb);
form.KeyPreview = true;
form.KeyDown += (sender, args) =>
{
if (args.KeyCode.HasFlag(Keys.Enter))
((Form)sender).Close();
};
form.Text = "Grayscale Mat to Bitmap Test";
form.ShowDialog();
}
}
开发者ID:jorik041,项目名称:opencvsharp,代码行数:26,代码来源:MatToBitmap.cs
示例11: usingCppInterface1
private static void usingCppInterface1()
{
// Cv2.ImRead
using (var src = new Mat(@"..\..\Images\Penguin.Png", LoadMode.AnyDepth | LoadMode.AnyColor))
using (var dst = new Mat())
{
src.CopyTo(dst);
for (var y = 0; y < src.Height; y++)
{
for (var x = 0; x < src.Width; x++)
{
var pixel = src.Get<Vec3b>(y, x);
var newPixel = new Vec3b
{
Item0 = (byte)(255 - pixel.Item0), // B
Item1 = (byte)(255 - pixel.Item1), // G
Item2 = (byte)(255 - pixel.Item2) // R
};
dst.Set(y, x, newPixel);
}
}
// [Cpp] Accessing Pixel
// https://github.com/shimat/opencvsharp/wiki/%5BCpp%5D-Accessing-Pixel
//Cv2.NamedWindow();
//Cv2.ImShow();
using (new Window("C++ Interface: Src", image: src))
using (new Window("C++ Interface: Dst", image: dst))
{
Cv2.WaitKey(0);
}
}
}
开发者ID:kauser-cse-buet,项目名称:OpenCVSharp-Samples,代码行数:35,代码来源:Program.cs
示例12: Update
// Update is called once per frame
void Update () {
cap.Read (frame);
if (!frame.Empty()){
//assume this part of the frame contains only background
smoothed_img = frame.Blur(new Size(5,5));
frame_hsv = frame.CvtColor (ColorConversionCodes.BGR2HSV);
Scalar lb = new Scalar (0, 0, 50);
Scalar ub = new Scalar (180, 70, 180);
Mat disc = Cv2.GetStructuringElement (MorphShapes.Ellipse, new Size (7, 7));
Cv2.MorphologyEx (thresh, thresh, MorphTypes.Close, disc,null,3);
contours = Cv2.FindContoursAsMat (thresh , RetrievalModes.List, ContourApproximationModes.ApproxSimple);
mask = new Mat (thresh.Size (), thresh.Type (), Scalar.All (0));
Cv2.Merge(new Mat[]{mask,mask,mask},mask);
Cv2.BitwiseAnd (mask, frame, mask);
//Cv2.Merge(new Mat[]{frame_backproj,frame_backproj,frame_backproj},frame_backproj);
tex.LoadImage (smoothed_img.ToBytes (".png", new int[]{ 0 }));
}
}
开发者ID:orlitany,项目名称:coffeeUnderTheSea,代码行数:36,代码来源:segment_simple.cs
示例13: ProcessImage
private void ProcessImage(IInputOutputArray image)
{
Stopwatch watch = Stopwatch.StartNew(); // time the detection process
List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>();
List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>();
List<RotatedRect> licenseBoxList = new List<RotatedRect>();
List<string> words = _licensePlateDetector.DetectLicensePlate(
image,
licensePlateImagesList,
filteredLicensePlateImagesList,
licenseBoxList);
watch.Stop(); //stop the timer
processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds);
panel1.Controls.Clear();
Point startPoint = new Point(10, 10);
for (int i = 0; i < words.Count; i++)
{
Mat dest = new Mat();
CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest);
AddLabelAndImage(
ref startPoint,
String.Format("License: {0}", words[i]),
dest);
PointF[] verticesF = licenseBoxList[i].GetVertices();
Point[] vertices = Array.ConvertAll(verticesF, Point.Round);
using(VectorOfPoint pts = new VectorOfPoint(vertices))
CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2 );
}
}
开发者ID:reidblomquist,项目名称:emgucv,代码行数:34,代码来源:LicensePlateRecognitionForm.cs
示例14: hammDistMarker
/// <summary>
/// Hamms the dist marker.
/// </summary>
/// <returns>The dist marker.</returns>
/// <param name="bits">Bits.</param>
public static int hammDistMarker (Mat bits, byte[,] markerDesign)
{
int dist = 0;
int size = markerDesign.GetLength(0);
byte[] b = new byte[size * size];
bits.get (0, 0, b);
for (int y=0; y<size; y++) {
int sum = 0;
for (int x=0; x<size; x++) {
sum += (b [y*size + x] == markerDesign [y,x]) ? 0 : 1;
}
dist += sum;
}
return dist;
}
开发者ID:wlstks7,项目名称:MarkerBasedARSample,代码行数:30,代码来源:Marker.cs
示例15: DetectFace
/// <summary>
///
/// </summary>
/// <param name="cascade"></param>
/// <returns></returns>
private Mat DetectFace(CascadeClassifier cascade)
{
Mat result;
using (var src = new Mat(FilePath.Image.Yalta, LoadMode.Color))
using (var gray = new Mat())
{
result = src.Clone();
Cv2.CvtColor(src, gray, ColorConversion.BgrToGray, 0);
// Detect faces
Rect[] faces = cascade.DetectMultiScale(
gray, 1.08, 2, HaarDetectionType.ScaleImage, new Size(30, 30));
// Render all detected faces
foreach (Rect face in faces)
{
var center = new Point
{
X = (int)(face.X + face.Width * 0.5),
Y = (int)(face.Y + face.Height * 0.5)
};
var axes = new Size
{
Width = (int)(face.Width * 0.5),
Height = (int)(face.Height * 0.5)
};
Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4);
}
}
return result;
}
开发者ID:0sv,项目名称:opencvsharp,代码行数:37,代码来源:FaceDetection.cs
示例16: detect
public List<Point[]> detect (Mat im, float scaleFactor, int minNeighbours, OpenCVForUnity.Size minSize)
{
//convert image to greyscale
Mat gray = null;
if (im.channels () == 1) {
gray = im;
} else {
gray = new Mat ();
Imgproc.cvtColor (im, gray, Imgproc.COLOR_RGBA2GRAY);
}
using (Mat equalizeHistMat = new Mat ())
using (MatOfRect faces = new MatOfRect ()) {
Imgproc.equalizeHist (gray, equalizeHistMat);
detector.detectMultiScale (equalizeHistMat, faces, scaleFactor, minNeighbours, 0
| Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, minSize, new Size ());
if (faces.rows () < 1) {
return new List<Point[]> ();
}
return convertMatOfRectToPoints (faces);
}
}
开发者ID:mosnyder,项目名称:facerace,代码行数:29,代码来源:FaceDetector.cs
示例17: FindHomography
/// <summary>
/// Finds perspective transformation H=||h_ij|| between the source and the destination planes
/// </summary>
/// <param name="srcPoints">Point coordinates in the original plane</param>
/// <param name="dstPoints">Point coordinates in the destination plane</param>
/// <param name="homography">The output homography matrix</param>
/// <param name="method">FindHomography method</param>
/// <param name="ransacReprojThreshold">
/// The maximum allowed reprojection error to treat a point pair as an inlier.
/// The parameter is only used in RANSAC-based homography estimation.
/// E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3
/// </param>
/// <param name="mask">Optional output mask set by a robust method ( CV_RANSAC or CV_LMEDS ). Note that the input mask values are ignored.</param>
/// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
public static void FindHomography(
PointF[] srcPoints,
PointF[] dstPoints,
IOutputArray homography,
CvEnum.HomographyMethod method,
double ransacReprojThreshold = 3,
IOutputArray mask = null)
{
GCHandle srcHandle = GCHandle.Alloc(srcPoints, GCHandleType.Pinned);
GCHandle dstHandle = GCHandle.Alloc(dstPoints, GCHandleType.Pinned);
try
{
using (
Mat srcPointMatrix = new Mat(srcPoints.Length, 2, DepthType.Cv32F, 1, srcHandle.AddrOfPinnedObject(), 8))
using (
Mat dstPointMatrix = new Mat(dstPoints.Length, 2, DepthType.Cv32F, 1, dstHandle.AddrOfPinnedObject(), 8))
{
CvInvoke.FindHomography(srcPointMatrix, dstPointMatrix, homography, method, ransacReprojThreshold, mask);
}
}
finally
{
srcHandle.Free();
dstHandle.Free();
}
}
开发者ID:neutmute,项目名称:emgucv,代码行数:40,代码来源:CvInvokeCalib3d.cs
示例18: FlannColoredModelPoints
public FlannColoredModelPoints(List<Tuple<CvPoint3D64f, CvColor>> modelPoints, IndexParams indexParams, SearchParams searchParams, double colorScale)
{
_modelPoints = modelPoints;
_modelMat = new CvMat(_modelPoints.Count, 6, MatrixType.F32C1);
unsafe
{
float* modelArr = _modelMat.DataSingle;
foreach (var tuple in _modelPoints)
{
*(modelArr++) = (float)tuple.Item1.X;
*(modelArr++) = (float)tuple.Item1.Y;
*(modelArr++) = (float)tuple.Item1.Z;
*(modelArr++) = (float)(tuple.Item2.R * colorScale / 255);
*(modelArr++) = (float)(tuple.Item2.G * colorScale / 255);
*(modelArr++) = (float)(tuple.Item2.B * colorScale / 255);
}
}
_colorScale = colorScale;
_modelDataMat = new Mat(_modelMat);
_indexParam = indexParams;
_searchParam = searchParams;
_indexParam.IsEnabledDispose = false;
_searchParam.IsEnabledDispose = false;
_flannIndex = new Index(_modelDataMat, _indexParam);
}
开发者ID:guozanhua,项目名称:KinectMotionCapture,代码行数:26,代码来源:ColoredIterativePointMatching.cs
示例19: Start
// Use this for initialization
void Start()
{
Texture2D inputTexture = Resources.Load ("lena") as Texture2D;
Mat inputMat = new Mat (inputTexture.height, inputTexture.width, CvType.CV_8UC4);
Utils.texture2DToMat (inputTexture, inputMat);
Debug.Log ("inputMat dst ToString " + inputMat.ToString ());
Mat src_mat = new Mat (4, 1, CvType.CV_32FC2);
Mat dst_mat = new Mat (4, 1, CvType.CV_32FC2);
src_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 0.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols ());
dst_mat.put (0, 0, 0.0, 0.0, inputMat.rows (), 200.0, 0.0, inputMat.cols (), inputMat.rows (), inputMat.cols () - 200.0);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform (src_mat, dst_mat);
Mat outputMat = inputMat.clone ();
Imgproc.warpPerspective (inputMat, outputMat, perspectiveTransform, new Size (inputMat.rows (), inputMat.cols ()));
Texture2D outputTexture = new Texture2D (outputMat.cols (), outputMat.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (outputMat, outputTexture);
gameObject.GetComponent<Renderer> ().material.mainTexture = outputTexture;
}
开发者ID:ygx2011,项目名称:OpenCVForUnity,代码行数:27,代码来源:WrapPerspectiveSample.cs
示例20: Start
// Use this for initialization
void Start()
{
rgbMat = new Mat ();
capture = new VideoCapture ();
capture.open (Utils.getFilePath ("768x576_mjpeg.mjpeg"));
if (capture.isOpened ()) {
Debug.Log ("capture.isOpened() true");
} else {
Debug.Log ("capture.isOpened() false");
}
Debug.Log ("CAP_PROP_FORMAT: " + capture.get (Videoio.CAP_PROP_FORMAT));
Debug.Log ("CV_CAP_PROP_PREVIEW_FORMAT: " + capture.get (Videoio.CV_CAP_PROP_PREVIEW_FORMAT));
Debug.Log ("CAP_PROP_POS_MSEC: " + capture.get (Videoio.CAP_PROP_POS_MSEC));
Debug.Log ("CAP_PROP_POS_FRAMES: " + capture.get (Videoio.CAP_PROP_POS_FRAMES));
Debug.Log ("CAP_PROP_POS_AVI_RATIO: " + capture.get (Videoio.CAP_PROP_POS_AVI_RATIO));
Debug.Log ("CAP_PROP_FRAME_COUNT: " + capture.get (Videoio.CAP_PROP_FRAME_COUNT));
Debug.Log ("CAP_PROP_FPS: " + capture.get (Videoio.CAP_PROP_FPS));
Debug.Log ("CAP_PROP_FRAME_WIDTH: " + capture.get (Videoio.CAP_PROP_FRAME_WIDTH));
Debug.Log ("CAP_PROP_FRAME_HEIGHT: " + capture.get (Videoio.CAP_PROP_FRAME_HEIGHT));
texture = new Texture2D ((int)(frameWidth), (int)(frameHeight), TextureFormat.RGBA32, false);
gameObject.transform.localScale = new Vector3 ((float)frameWidth, (float)frameHeight, 1);
float widthScale = (float)Screen.width / (float)frameWidth;
float heightScale = (float)Screen.height / (float)frameHeight;
if (widthScale < heightScale) {
Camera.main.orthographicSize = ((float)frameWidth * (float)Screen.height / (float)Screen.width) / 2;
} else {
Camera.main.orthographicSize = (float)frameHeight / 2;
}
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
开发者ID:ygx2011,项目名称:OpenCVForUnity,代码行数:36,代码来源:HOGDescriptorSample.cs
注:本文中的Mat类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论