• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C# ColorImagePoint类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C#中ColorImagePoint的典型用法代码示例。如果您正苦于以下问题:C# ColorImagePoint类的具体用法?C# ColorImagePoint怎么用?C# ColorImagePoint使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



ColorImagePoint类属于命名空间,在下文中一共展示了ColorImagePoint类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。

示例1: adjustHandPosition

 private void adjustHandPosition(FrameworkElement element, ColorImagePoint hand, Double half_shoulder_width)
 {
     element.Width = half_shoulder_width;
     element.Height = half_shoulder_width;
     Canvas.SetLeft(element, hand.X - element.Width / 2);
     Canvas.SetTop(element, hand.Y - element.Height / 2);
 }
开发者ID:Kilen,项目名称:intelligent_mirror,代码行数:7,代码来源:MainWindow.xaml.cs


示例2: CameraPosition

 private void CameraPosition(FrameworkElement element, ColorImagePoint point)
 {
     //Divide by 2 for width and height so point is right in the middle
     // instead of in top/left corner
     Canvas.SetLeft(element, point.X - element.Width / 2);
     Canvas.SetTop(element, point.Y - element.Height / 2);
 }
开发者ID:samgooi4189,项目名称:Meme_Skeletal_Detection,代码行数:7,代码来源:MainWindow.xaml.cs


示例3: ConvertDepthColor

        /// <summary>
        /// 距離データをカラー画像に変換する
        /// </summary>
        /// <param name="kinect"></param>
        /// <param name="depthFrame"></param>
        /// <returns></returns>
        private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame )
        {
            ColorImageStream colorStream = kinect.ColorStream;
              DepthImageStream depthStream = kinect.DepthStream;

              // 距離カメラのピクセルごとのデータを取得する
              short[] depthPixel = new short[depthFrame.PixelDataLength];
              depthFrame.CopyPixelDataTo( depthPixel );

              // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
              ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
              kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
            colorStream.Format, colorPoint );

              byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel];
              for ( int index = 0; index < depthPixel.Length; index++ ) {
            // 距離カメラのデータから、プレイヤーIDと距離を取得する
            int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
            int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth;

            // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
            int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
            int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
            int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;

            if ( player != 0 ) {
              depthColor[colorIndex] = 255;
              depthColor[colorIndex + 1] = 255;
              depthColor[colorIndex + 2] = 255;
            }
            else {
              // サポート外 0-40cm
              if ( distance == depthStream.UnknownDepth ) {
            depthColor[colorIndex] = 0;
            depthColor[colorIndex + 1] = 0;
            depthColor[colorIndex + 2] = 255;
              }
              // 近すぎ 40cm-80cm(default mode)
              else if ( distance == depthStream.TooNearDepth ) {
            depthColor[colorIndex] = 0;
            depthColor[colorIndex + 1] = 255;
            depthColor[colorIndex + 2] = 0;
              }
              // 遠すぎ 3m(Near),4m(Default)-8m
              else if ( distance == depthStream.TooFarDepth ) {
            depthColor[colorIndex] = 255;
            depthColor[colorIndex + 1] = 0;
            depthColor[colorIndex + 2] = 0;
              }
              // 有効な距離データ
              else {
            depthColor[colorIndex] = 0;
            depthColor[colorIndex + 1] = 255;
            depthColor[colorIndex + 2] = 255;
              }
            }
              }

              return depthColor;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:66,代码来源:MainWindow.xaml.cs


示例4: AddLine

        private void AddLine(ColorImagePoint p1, ColorImagePoint p2)
        {
            Line myLine = new Line();
            myLine.Stroke = System.Windows.Media.Brushes.Black;

            myLine.X1 = p1.X;
            myLine.X2 = p2.X;
            myLine.Y1 = p1.Y;
            myLine.Y2 = p2.Y;
            myLine.StrokeThickness = 1;
            cvs.Children.Add(myLine);
        }
开发者ID:yolandax,项目名称:799,代码行数:12,代码来源:MainMenu.xaml.cs


示例5: CameraPosition

        private void CameraPosition(FrameworkElement element, ColorImagePoint point)
        {
            Canvas.SetLeft(element, point.X - element.Width / 2);
            Canvas.SetTop(element, point.Y - element.Height / 2);

            // Check if you're choosing any of the choices
            if (element.Name.Equals("rightEllipse"))
            {
                if(Canvas.GetLeft(element) > clickLeftBorder)
                {
                    Console.WriteLine("You clicked");
                    switch (greenIndex)
                    {
                        case 0:
                            clickLabel.Content = "Bottom box clicked";
                            break;
                        case 1:
                            clickLabel.Content = "Top box clicked";
                            break;
                        case 2:
                            clickLabel.Content = "Middle box clicked";
                            break;
                    }
                }
            }
            else if (element.Name.Equals("leftEllipse"))
            {
                if (Canvas.GetLeft(element) < rollRightBorder)
                {
                    Console.WriteLine("You be rollin");

                    if (rolling == false)
                    {
                        rollTimer.Start();
                        rolling = true;
                    }
                }
                else
                {
                    if(rolling == true)
                    {
                        rollTimer.Stop();
                        rolling = false;
                    }
                }
            }
        }
开发者ID:Crumble,项目名称:TDDD57,代码行数:47,代码来源:MainWindow.xaml.cs


示例6: BackgroundMask

        /// <summary>
        ///  プレーヤーだけ表示する
        /// </summary>
        /// <param name="colorFrame"></param>
        /// <param name="depthFrame"></param>
        /// <returns></returns>
        private byte[] BackgroundMask( KinectSensor kinect,
      ColorImageFrame colorFrame, DepthImageFrame depthFrame )
        {
            ColorImageStream colorStream = kinect.ColorStream;
              DepthImageStream depthStream = kinect.DepthStream;

              // RGBカメラのピクセルごとのデータを取得する
              byte[] colorPixel = new byte[colorFrame.PixelDataLength];
              colorFrame.CopyPixelDataTo( colorPixel );

              // 距離カメラのピクセルごとのデータを取得する
              short[] depthPixel = new short[depthFrame.PixelDataLength];
              depthFrame.CopyPixelDataTo( depthPixel );

              // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
              ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
              kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
            colorStream.Format, colorPoint );

              // 出力バッファ(初期値は白(255,255,255))
              byte[] outputColor = new byte[colorPixel.Length];
              for ( int i = 0; i < outputColor.Length; i += Bgr32BytesPerPixel ) {
            outputColor[i] = 255;
            outputColor[i + 1] = 255;
            outputColor[i + 2] = 255;
              }

              for ( int index = 0; index < depthPixel.Length; index++ ) {
            // プレイヤーを取得する
            int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;

            // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
            int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
            int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
            int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;

            // プレーヤーを検出した座標だけ、RGBカメラの画像を使う
            if ( player != 0 ) {
              outputColor[colorIndex] = colorPixel[colorIndex];
              outputColor[colorIndex + 1] = colorPixel[colorIndex + 1];
              outputColor[colorIndex + 2] = colorPixel[colorIndex + 2];
            }
              }

              return outputColor;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:52,代码来源:MainWindow.xaml.cs


示例7: CheckFacePosition

        /// <summary>
        /// 顔の位置を取得
        /// </summary>
        /// <param name="headPosition">スケルトンの頭の位置座標</param>
        /// <returns>顔座標</returns>
        private Rect CheckFacePosition( ColorImagePoint headPosition )
        {
            //切り取る領域の範囲
              int snipWidth = 200;
              int snipHeight = 200;

              // 返却用Rect (初期値はスケルトンの頭の座標とimage2画像の幅)
              Rect reRect = new Rect(headPosition.X, headPosition.Y,
                             image2.Width, image2.Height);

              storage.Clear();
              openCVGrayImage.ResetROI();           // たまにROIがセットされた状態で呼ばれるためROIをリセット

              openCVImage.CopyFrom( outputImage );                                        // WriteableBitmap -> IplImage
              Cv.CvtColor( openCVImage, openCVGrayImage, ColorConversion.BgrToGray );     // 画像をグレイスケール化
              Cv.EqualizeHist( openCVGrayImage, openCVGrayImage );                        // 画像の平滑化

              // 顔認識
              try {
            // 画像の切り取り
            var snipImage = SnipFaceImage( openCVGrayImage, headPosition, snipWidth, snipHeight );

            if ( snipImage != null ) {
              CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects( snipImage, cascade, storage );

              // 顔を検出した場合
              if ( faces.Total > 0 ) {
            reRect.X = faces[0].Value.Rect.X + (headPosition.X - snipWidth / 2);
            reRect.Y = faces[0].Value.Rect.Y + (headPosition.Y - snipHeight / 2);
            reRect.Width = faces[0].Value.Rect.Width;
            reRect.Height = faces[0].Value.Rect.Height;
              }
            }
              }
              catch ( Exception ) { }

              return reRect;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:43,代码来源:MainWindow.xaml.cs


示例8: IsSteady

        /// <summary>
        /// 停止状態にあるかチェックする
        /// </summary>
        /// <param name="skeletonFrame"></param>
        /// <param name="point"></param>
        /// <returns></returns>
        bool IsSteady( SkeletonFrame skeletonFrame, ColorImagePoint point )
        {
            var currentPoint = new FramePoint()
              {
            Point = point,
            TimeStamp = skeletonFrame.Timestamp,
              };

              // milliseconds時間経過したら steady
              if ( (currentPoint.TimeStamp - basePoint.TimeStamp) > milliseconds ) {
            basePoint = currentPoint;
            return true;
              }

              // 座標の変化量がthreshold以上ならば、basePointを更新して初めから計測
              if ( Math.Abs( currentPoint.Point.X - basePoint.Point.X ) > threshold
            || Math.Abs( currentPoint.Point.Y - basePoint.Point.Y ) > threshold ) {

            // 座標が動いたので基点を動いた位置にずらして、最初から計測
            basePoint = currentPoint;
              }

              return false;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:30,代码来源:MainWindow.xaml.cs


示例9: IsKnownPoint

 //
 // Summary:
 //     Tests whether the ColorImagePoint has a known value.
 //
 // Parameters:
 //   colorImagePoint:
 //     The ColorImagePoint to test.
 //
 // Returns:
 //     Returns true if the ColorImagePoint has a known value, false otherwise.
 public static bool IsKnownPoint(ColorImagePoint colorImagePoint);
开发者ID:Styrna,项目名称:TKinect,代码行数:11,代码来源:Microsoft.Kinect.KinectSensor.cs


示例10: DepthToColorCallback

        /// <summary>
        /// Callback to help with mapping depth pixel to color pixel data. Uses Kinect sensor's MapDepthToColorImagePoint to 
        /// do the conversion
        /// </summary>
        /// <returns>
        /// The depth to color callback.
        /// </returns>
        private int DepthToColorCallback(
            uint depthFrameWidth,
            uint depthFrameHeight,
            uint colorFrameWidth,
            uint colorFrameHeight,
            float zoomFactor,
            Point viewOffset,
            int depthX,
            int depthY,
            ushort depthZ,
            out int colorX,
            out int colorY)
        {
            int retCode = 0;
            colorX = 0;
            colorY = 0;

            if (this.sensor != null)
            {
                var colorPoint = new ColorImagePoint();
                try
                {
                    DepthImagePoint depthImagePoint = new DepthImagePoint()
                    {
                        X = depthX,
                        Y = depthY,
                        Depth = depthZ,
                    };

                    colorPoint = this.sensor.CoordinateMapper.MapDepthPointToColorPoint(
                        this.sensor.DepthStream.Format,
                        depthImagePoint,
                        this.sensor.ColorStream.Format);
                }
                catch (InvalidOperationException e)
                {
                    string traceStr = string.Format(
                        CultureInfo.CurrentCulture,
                        "Exception on MapDepthToColorImagePoint while translating depth point({0},{1},{2}). Exception={3}",
                        depthX,
                        depthY,
                        depthZ,
                        e.Message);
                    Trace.WriteLineIf(this.traceLevel >= TraceLevel.Error, traceStr, TraceCategory);

                    retCode = -1;
                }

                colorX = colorPoint.X;
                colorY = colorPoint.Y;
            }
            else
            {
                retCode = -1;
            }

            return retCode;
        }
开发者ID:hameleon-ed,项目名称:dx11-vvvv,代码行数:65,代码来源:FaceTracker.cs


示例11: ConvertDepthColor

        /// <summary>
        /// 距離データをカラー画像に変換する
        /// </summary>
        /// <param name="kinect"></param>
        /// <param name="depthFrame"></param>
        /// <returns></returns>
        private byte[] ConvertDepthColor( KinectSensor kinect, DepthImageFrame depthFrame )
        {
            ColorImageStream colorStream = kinect.ColorStream;
              DepthImageStream depthStream = kinect.DepthStream;

              // 距離カメラのピクセルごとのデータを取得する
              short[] depthPixel = new short[depthFrame.PixelDataLength];
              depthFrame.CopyPixelDataTo( depthPixel );

              // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
              ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
              kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
            colorStream.Format, colorPoint );

              byte[] depthColor = new byte[depthFrame.PixelDataLength * Bgr32BytesPerPixel];
              for ( int index = 0; index < depthPixel.Length; index++ ) {
            // 距離カメラのデータから、プレイヤーIDと距離を取得する
            int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;
            int distance = depthPixel[index] >> DepthImageFrame.PlayerIndexBitmaskWidth;

            // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
            int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
            int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
            int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;

            // プレイヤーがいるピクセルの場合
            if ( player != 0 ) {
              // 有効なプレーヤーに色付けする
              if ( enablePlayer[player] ) {
            depthColor[colorIndex] = playerColor[player].B;
            depthColor[colorIndex + 1] = playerColor[player].G;
            depthColor[colorIndex + 2] = playerColor[player].R;
              }
            }
              }

              return depthColor;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:44,代码来源:MainWindow.xaml.cs


示例12: CameraPosition

 private void CameraPosition(FrameworkElement element, ColorImagePoint point)
 {
     Console.Out.WriteLine(point.X.ToString());
     Canvas.SetLeft(element, point.X - element.Width / 2);
     Canvas.SetTop(element, point.Y - element.Height / 2);
 }
开发者ID:nayak16,项目名称:Kinect-Experimentation-,代码行数:6,代码来源:MainWindow.xaml.cs


示例13: SensorAllFramesReady

        private byte[][] SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            bool depthReceived = false;
            bool colorReceived = false;

            DepthImagePixel[] depthPixels;
            byte[] colorPixels;
            ColorImagePoint[] colorCoordinates;
            int colorToDepthDivisor;
            byte[] greenScreenPixelData;

            // Allocate space to put the color pixels we'll create
            depthPixels = new DepthImagePixel[this.kinectSensor.DepthStream.FramePixelDataLength];
            colorPixels = new byte[this.kinectSensor.ColorStream.FramePixelDataLength];
            greenScreenPixelData = new byte[this.kinectSensor.DepthStream.FramePixelDataLength];
            colorCoordinates = new ColorImagePoint[this.kinectSensor.DepthStream.FramePixelDataLength];

            int colorWidth = this.kinectSensor.ColorStream.FrameWidth;
            int colorHeight = this.kinectSensor.ColorStream.FrameHeight;
            colorToDepthDivisor = colorWidth / 640;

            byte[][] results = new byte[2][]; // kinectSensor.DepthStream.FramePixelDataLength];

            DepthImageFormat DepthFormat = DepthImageFormat.Resolution640x480Fps30;
            ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (null != depthFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);
                    depthReceived = true;
                }
            }

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (null != colorFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    this.outputColorBitmap = new WriteableBitmap(640, 480, 96, 96, PixelFormats.Bgr32, null);
                    colorFrame.CopyPixelDataTo(colorPixels);
                    colorReceived = true;
                }
            }

            if (true == depthReceived)
            {
                this.kinectSensor.CoordinateMapper.MapDepthFrameToColorFrame(
                    DepthFormat,
                    depthPixels,
                    ColorFormat,
                    colorCoordinates);

                Array.Clear(greenScreenPixelData, 0, greenScreenPixelData.Length);

                // loop over each row and column of the depth
                for (int y = 0; y < 480; ++y)
                {
                    for (int x = 0; x < 640; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * 640);

                        DepthImagePixel depthPixel = depthPixels[depthIndex];

                        int player = depthPixel.PlayerIndex;

                        // if we're tracking a player for the current pixel, do green screen
                        if (player > 0)
                        {
                            // retrieve the depth to color mapping for the current depth pixel
                            ColorImagePoint colorImagePoint = colorCoordinates[depthIndex];

                            // scale color coordinates to depth resolution
                            int colorInDepthX = colorImagePoint.X / colorToDepthDivisor;
                            int colorInDepthY = colorImagePoint.Y / colorToDepthDivisor;

                            // make sure the depth pixel maps to a valid point in color space
                            if (colorInDepthX > 0 && colorInDepthX < 640 && colorInDepthY >= 0 && colorInDepthY < 480)
                            {
                                // calculate index into the green screen pixel array
                                int greenScreenIndex = colorInDepthX + (colorInDepthY * 640);

                                // set opaque
                                greenScreenPixelData[greenScreenIndex] = 33;

                                // compensate for depth/color not corresponding exactly by setting the pixel
                                // to the left to opaque as well
                                greenScreenPixelData[greenScreenIndex - 1] = 33;
                            }
                        }
                    }
                }
            }

            if (true == colorReceived)
            {
                // Write the pixel data into our bitmap
//.........这里部分代码省略.........
开发者ID:robinj,项目名称:parse-client,代码行数:101,代码来源:BasicTracker.xaml.cs


示例14: SaveBuffer

        /// <summary>
        /// キネクトの画像をバッファへ保存する
        /// </summary>
        /// <param name="kinectDevice"></param>
        /// <param name="colorFrame"></param>
        /// <param name="depthFrame"></param>
        private void SaveBuffer(ColorImageFrame colorFrame, DepthImageFrame depthFrame, SkeletonFrame skeletonFrame)
        {
            if (kinectDevice == null || depthFrame == null || colorFrame == null || skeletonFrame == null) return;

            ColorImageStream colorStream = kinectDevice.ColorStream;
            DepthImageStream depthStream = kinectDevice.DepthStream;
            screenImageStride = kinectDevice.DepthStream.FrameWidth * colorFrame.BytesPerPixel;

            int colorStride = colorFrame.BytesPerPixel * colorFrame.Width; //4×画像幅

            int ImageIndex = 0;

            depthFrame.CopyPixelDataTo(_depthPixelData);
            colorFrame.CopyPixelDataTo(_colorPixelData);

            ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
            short[] depthPixel = new short[depthFrame.PixelDataLength];
            kinectDevice.MapDepthFrameToColorFrame(depthFrame.Format, depthPixel, colorFrame.Format, colorPoint);

            byte[] byteRoom = new byte[depthFrame.Height * screenImageStride];
            byte[] bytePlayer = new byte[depthFrame.Height * screenImageStride];

            double[] depth = new double[depthFrame.Height * screenImageStride];
            int[] playerIndexArray = new int[depthFrame.Height * screenImageStride];

            for (int depthY = 0; depthY < depthFrame.Height; depthY++)
            {
                for (int depthX = 0; depthX < depthFrame.Width; depthX++, ImageIndex += colorFrame.BytesPerPixel)
                {
                    //ImageIndex += colorFrame.BytesPerPixel;
                    int depthPixelIndex = depthX + (depthY * depthFrame.Width);

                    int playerIndex = _depthPixelData[depthPixelIndex] & DepthImageFrame.PlayerIndexBitmask; //人のID取得

                    int x = Math.Min(colorPoint[depthPixelIndex].X, colorStream.FrameWidth - 1);
                    int y = Math.Min(colorPoint[depthPixelIndex].Y, colorStream.FrameHeight - 1);

                    int colorPixelIndex = (x * colorFrame.BytesPerPixel) + (y * colorStride);

                    if (playerIndex != 0)
                    {
                        bytePlayer[ImageIndex] = _colorPixelData[colorPixelIndex];           //Blue
                        bytePlayer[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1];   //Green
                        bytePlayer[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2];   //Red
                        bytePlayer[ImageIndex + 3] = 0xFF;                             //Alpha

                        //ピクセル深度を取得
                        depth[ImageIndex] = _depthPixelData[depthPixelIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                        playerIndexArray[ImageIndex] = playerIndex;
                    }
                    else
                    {
                        byteRoom[ImageIndex] = _colorPixelData[colorPixelIndex];           //Blue
                        byteRoom[ImageIndex + 1] = _colorPixelData[colorPixelIndex + 1];   //Green
                        byteRoom[ImageIndex + 2] = _colorPixelData[colorPixelIndex + 2];   //Red
                        byteRoom[ImageIndex + 3] = 0xFF;                             //Alpha
                    }

                }
            }

            //人の情報をリングバッファへ保存
            ringbuf.save_framedata(ref bytePlayer);
            //ringbuf.save_depthdata(depth);
            ringbuf.save_playerIndexdata(playerIndexArray);
            ringbuf.set_nextframe();

            ////byteからビットマップへ書出し
            //_room_bitmap.WritePixels(_screenImageRect, byteRoom, screenImageStride, 0);
            //room_image.Source = _room_bitmap;

            RenderScreen2();
        }
开发者ID:sugasaki,项目名称:kinect-etc,代码行数:79,代码来源:MainWindow.xaml.cs


示例15: MoveToCameraPosition

 private void MoveToCameraPosition(FrameworkElement element, ColorImagePoint point)
 {
     Canvas.SetLeft(element, point.X );
     Canvas.SetTop(element, point.Y );
 }
开发者ID:Cocotus,项目名称:kinect,代码行数:5,代码来源:NoddingWindow.xaml.cs


示例16: kinectPlayerImage

        private void kinectPlayerImage(ColorImageFrame colorFrame, DepthImageFrame depthFrame)
        {
            if (colorFrame == null || depthFrame == null) { return;  }

            // Image color index
            const int BlueIndex = 0;
            const int GreenIndex = 1;
            const int RedIndex = 2;
            const int AlphaIndex = 3;

            // Get color image
            byte[] colorPixels = new Byte[colorFrame.PixelDataLength];
            colorFrame.CopyPixelDataTo(colorPixels);

            // Get depth image
            short[] rawDepthData = new short[depthFrame.PixelDataLength];
            depthFrame.CopyPixelDataTo(rawDepthData);

            // Create array to hold depth mapping data.
            ColorImagePoint[] _mappedDepthLocations = new ColorImagePoint[depthFrame.PixelDataLength];
            kinectSensorChooser.Kinect.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, rawDepthData, ColorImageFormat.RgbResolution640x480Fps30, _mappedDepthLocations);

            // Each index in depth array is equal to 4 pixels in color array (B, G, R, A)
            for (int depthIndex = 0, colorIndex = 0;
                depthIndex < rawDepthData.Length && colorIndex < colorPixels.Length;
                depthIndex++, colorIndex += 4)
            {
                // Get the player (requires skeleton tracking enabled for values)
                int player = rawDepthData[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
                if (player > 0)
                {
                    // Not a player
                    ColorImagePoint point = _mappedDepthLocations[depthIndex];
                    if ((point.X >= 0 && point.X < colorFrame.Width) && (point.Y >= 0 && point.Y < colorFrame.Height))
                    {
                        int baseIndex = (point.Y * colorFrame.Width + point.X) * 4;
                        colorPixels[baseIndex + AlphaIndex] = 255;
                    }
                }
            }

            // Update the image
            int stride = colorFrame.Width * 4; // (B,G,R,Empty)
            playerBitmap.WritePixels(new Int32Rect(0, 0, playerBitmap.PixelWidth, playerBitmap.PixelHeight), colorPixels, stride, 0);
            playerImage.Source = playerBitmap;
        }
开发者ID:cableman,项目名称:kinect_samples,代码行数:46,代码来源:MainWindow.xaml.cs


示例17: C

 public ColorImagePoint[] C()
 {
     ColorImagePoint[] a = new ColorImagePoint[640 * 480];
     this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, this.depthPixels, ColorImageFormat.RgbResolution640x480Fps30, a);
     return a;
 }
开发者ID:sarsadsl,项目名称:fall_down_detect_with_chart,代码行数:6,代码来源:MainWindow.xaml.cs


示例18: OpticalCamouflage

        /// <summary>
        /// 光学迷彩
        /// </summary>
        /// <param name="kinect"></param>
        /// <param name="colorFrame"></param>
        /// <param name="depthFrame"></param>
        /// <returns></returns>
        private byte[] OpticalCamouflage( KinectSensor kinect, 
      ColorImageFrame colorFrame, DepthImageFrame depthFrame )
        {
            ColorImageStream colorStream = kinect.ColorStream;
              DepthImageStream depthStream = kinect.DepthStream;

              // RGBカメラのピクセルごとのデータを取得する
              byte[] colorPixel = new byte[colorFrame.PixelDataLength];
              colorFrame.CopyPixelDataTo( colorPixel );

              // 背景がないときは、そのときのフレームを背景として保存する
              if ( backPixel == null ) {
            backPixel = new byte[colorFrame.PixelDataLength];
            Array.Copy( colorPixel, backPixel, backPixel.Length );
              }

              // 距離カメラのピクセルごとのデータを取得する
              short[] depthPixel = new short[depthFrame.PixelDataLength];
              depthFrame.CopyPixelDataTo( depthPixel );

              // 距離カメラの座標に対応するRGBカメラの座標を取得する(座標合わせ)
              ColorImagePoint[] colorPoint = new ColorImagePoint[depthFrame.PixelDataLength];
              kinect.MapDepthFrameToColorFrame( depthStream.Format, depthPixel,
            colorStream.Format, colorPoint );

              // 出力バッファ(初期値はRGBカメラの画像)
              byte[] outputColor = new byte[colorPixel.Length];
              Array.Copy( colorPixel, outputColor, outputColor.Length );

              for ( int index = 0; index < depthPixel.Length; index++ ) {
            // プレイヤーを取得する
            int player = depthPixel[index] & DepthImageFrame.PlayerIndexBitmask;

            // 変換した結果が、フレームサイズを超えることがあるため、小さいほうを使う
            int x = Math.Min( colorPoint[index].X, colorStream.FrameWidth - 1 );
            int y = Math.Min( colorPoint[index].Y, colorStream.FrameHeight - 1 );
            int colorIndex = ((y * depthFrame.Width) + x) * Bgr32BytesPerPixel;

            // プレーヤーを検出した座標は、背景画像を使う
            if ( player != 0 ) {
              outputColor[colorIndex] = backPixel[colorIndex];
              outputColor[colorIndex + 1] = backPixel[colorIndex + 1];
              outputColor[colorIndex + 2] = backPixel[colorIndex + 2];
            }
              }

              return outputColor;
        }
开发者ID:hatsunea,项目名称:KinectSDKBook4CS,代码行数:55,代码来源:MainWindow.xaml.cs


示例19: GetROI

        void GetROI(Skeleton user, DepthImageFrame depthFrame , ColorImageFrame color_frame = null)
        {

            
            // Map skeleton to Depth 
            DepthImagePoint rightHandPoint = 
                _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);

            DepthImagePoint rightWristPoint =
                _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.WristRight].Position, DepthImageFormat.Resolution640x480Fps30);

            int hand_depth = (rightHandPoint.Depth>rightWristPoint.Depth)?rightHandPoint.Depth:rightWristPoint.Depth+10; // hand depth used for segmenting out the hand


            //*********************************** Map The depth Image to color Image to align the color image************************************************************************

            DepthImagePixel[] depthImagePixels = new DepthImagePixel[depthFrame.PixelDataLength];
            depthFrame.CopyDepthImagePixelDataTo(depthImagePixels);

            short[] rawDepthData = new short[depthFrame.PixelDataLength];
            depthFrame.CopyPixelDataTo(rawDepthData);

            ColorImagePoint[] mapped_depth_locations = new ColorImagePoint[depthFrame.PixelDataLength];

            _sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, depthImagePixels, ColorImageFormat.RgbResolution640x480Fps30, mapped_depth_locations);
            byte[] aligned_colorPixels = new byte[color_frame.PixelDataLength];  // creating a byte array for storing the aligned pixel values

            byte[] original_colorPixels = new byte[color_frame.PixelDataLength];
            color_frame.CopyPixelDataTo(original_colorPixels);
            int aligned_image_index = 0;
            //int hand_baseindex = rightHandPoint.Y*640 + rightHandPoint.X;
            for (int i = 0; i < mapped_depth_locations.Length; i++)
            {
                
                int depth = rawDepthData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                //Console.WriteLine(depth);
                ColorImagePoint point = mapped_depth_locations[i];
                
                if ((point.X >= 0 && point.X < 640) && (point.Y >= 0 && point.Y < 480))
                {
                    int baseIndex = (point.Y * 640 + point.X) * 4;
                    if (depth < hand_depth && depth != -1)
                    {
                        
                        aligned_colorPixels[aligned_image_index] = original_colorPixels[baseIndex];
                        aligned_colorPixels[aligned_image_index + 1] = original_colorPixels[baseIndex + 1];
                        aligned_colorPixels[aligned_image_index + 2] = original_colorPixels[baseIndex + 2];
                        aligned_colorPixels[aligned_image_index + 3] = 0;
                    }
                    else
                    {
                        aligned_colorPixels[aligned_image_index] = 0;
                        aligned_colorPixels[aligned_image_index + 1] = 0;
                        aligned_colorPixels[aligned_image_index + 2] = 0;
                        aligned_colorPixels[aligned_image_index + 3] = 0;
                    }
                } 
                aligned_image_index = aligned_image_index + 4;
               


                // *************************** Now modify the contents of this aligned_colorBitmap using the depth information ***************************************************
              


            }


            //***********************************************************************************************************************************************************************



            


            int threshold = 20;
            
            int hand_length = 3 * Math.Max(Math.Abs(rightHandPoint.X - rightWristPoint.X), Math.Abs(rightHandPoint.Y - rightWristPoint.Y));

          //  int hand_length = (int)Math.Sqrt((rightHandPoint.X - rightWristPoint.X) ^ 2 + (rightHandPoint.Y - rightWristPoint.Y) ^ 2);
           
            int hand_length_old = hand_length;
            //****************************Low pass filter for hand_length*********************************

            if (Math.Abs(hand_length - hand_length_old) > threshold)
                hand_length = hand_length_old;

            //************************************************************************************************

           // Console.WriteLine(hand_length);
            int top_left_X_depth = rightHandPoint.X - hand_length;
            int top_left_Y_depth = rightHandPoint.Y - hand_length;
            int top_left_Z_depth = rightHandPoint.Depth;


            top_left_X_depth = (top_left_X_depth<0)? 0 : top_left_X_depth;
            top_left_Y_depth = (top_left_Y_depth<0)? 0 : top_left_Y_depth;

            DepthImagePoint top_left = new DepthImagePoint();
            top_left.X = top_left_X_depth;
//.........这里部分代码省略.........
开发者ID:varun-invent,项目名称:M.Tech-Project,代码行数:101,代码来源:MainWindow.xaml.cs


示例20: MapDepthFrameToColorFrame

 public void MapDepthFrameToColorFrame(DepthImageFormat depthImageFormat, short[] depthPixelData, ColorImageFormat colorImageFormat, ColorImagePoint[] colorCoordinates);
开发者ID:Styrna

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C# ColorModel类代码示例发布时间:2022-05-24
下一篇:
C# ColorImageFormat类代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap