本文整理汇总了C#中KinectSensor类的典型用法代码示例。如果您正苦于以下问题:C# KinectSensor类的具体用法?C# KinectSensor怎么用?C# KinectSensor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
KinectSensor类属于命名空间,在下文中一共展示了KinectSensor类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: Start
// Use this for initialization
void Start()
{
// set up kinect connection
sensor = KinectSensor.GetDefault();
if (sensor != null)
{
bodyReader = sensor.BodyFrameSource.OpenReader();
// faceFrameSources = new FaceFrameSource[bodyCount];
// faceFrameReaders = new FaceFrameReader[bodyCount];
//
// for(int i = 0; i < bodyCount; ++i)
// {
// faceFrameSources[i] = FaceFrameSource.Create(sensor, 0, FaceFrameFeatures.LookingAway);
// faceFrameReaders[i] = faceFrameSources[i].OpenReader();
// }
//
// faceFrameResults = new FaceFrameResult[bodyCount];
if (!sensor.IsOpen)
sensor.Open ();
}
else
Debug.Log ("No Kinect Sensor found. Check connections / power");
}
开发者ID:JimVincent,项目名称:SensoryDataProject,代码行数:27,代码来源:KinectManager_Body.cs
示例2: Start
// Use this for initialization
void Start()
{
// Find a Kinect sensor
KinectSensorCollection kinectSensors = KinectSensor.KinectSensors;
if(kinectSensors.Count == 0)
{
this.sensor = null;
throw new UnityException("Could not find a Kinect sensor.");
}
// Enable the skeleton stream
this.sensor = kinectSensors[0];
this.sensor.SkeletonStream.Enable();
if(!this.sensor.SkeletonStream.IsEnabled)
{
throw new UnityException("Sensor could not be enabled.");
}
// Create the input processor
this.inputProcessor = new SeatedInfoProcessor();
//inputProcessor = new InputProcessor(this.sensor.CoordinateMapper, DepthImageFormat.Resolution320x240Fps30);
this.InputInfo = null;
Debug.Log("Hello");
return;
}
开发者ID:DavidIllidge,项目名称:Portfolio,代码行数:26,代码来源:KinectInputController.cs
示例3: kinect_IsAvailableChanged
void kinect_IsAvailableChanged( KinectSensor sender, IsAvailableChangedEventArgs args )
{
// Kinectが接続された
if ( args.IsAvailable ) {
// カラーを設定する
if ( colorFrameReader == null ) {
// カラー画像の情報を作成する(BGRAフォーマット)
colorFrameDesc = kinect.ColorFrameSource.CreateFrameDescription( ColorImageFormat.Bgra );
colorBitmap = new WriteableBitmap( colorFrameDesc.Width, colorFrameDesc.Height );
ImageColor.Source = colorBitmap;
colorBuffer = new byte[colorFrameDesc.Width * colorFrameDesc.Height * colorFrameDesc.BytesPerPixel];
// カラーリーダーを開く
colorFrameReader = kinect.ColorFrameSource.OpenReader();
colorFrameReader.FrameArrived += colorFrameReader_FrameArrived;
}
ImageColor.Source = colorBitmap;
TextStatus.Text = "Kinectが接続されました";
}
// Kinectが外された
else {
// イメージを初期化する
ImageColor.Source = null;
TextStatus.Text = "Kinectが外されました";
}
}
开发者ID:kaorun55,项目名称:Kinect-for-Windows-SDK-v2.0-Samples,代码行数:31,代码来源:MainPage.xaml.cs
示例4: OnNavigatedTo
protected override void OnNavigatedTo( NavigationEventArgs e )
{
base.OnNavigatedTo( e );
try {
// Kinectを開く
kinect = KinectSensor.GetDefault();
if ( kinect == null ) {
throw new Exception( "Kinectを開けません" );
}
kinect.Open();
// カラー画像の情報を作成する(BGRAフォーマット)
colorFrameDesc = kinect.ColorFrameSource.CreateFrameDescription( ColorImageFormat.Bgra );
colorBitmap = new WriteableBitmap( colorFrameDesc.Width, colorFrameDesc.Height );
ImageColor.Source = colorBitmap;
colorBuffer = new byte[colorFrameDesc.Width * colorFrameDesc.Height * colorFrameDesc.BytesPerPixel];
// カラーリーダーを開く
colorFrameReader = kinect.ColorFrameSource.OpenReader();
colorFrameReader.FrameArrived += colorFrameReader_FrameArrived;
}
catch ( Exception ex ) {
MessageDialog dlg = new MessageDialog( ex.Message );
dlg.ShowAsync();
}
}
开发者ID:kaorun55,项目名称:Kinect-for-Windows-SDK-v2.0-Samples,代码行数:30,代码来源:MainPage.xaml.cs
示例5: MainPage
public MainPage()
{
InitializeComponent();
_sensor = KinectSensor.GetDefault();
if (_sensor != null)
{
_sensor.Open();
_bodies = new Body[_sensor.BodyFrameSource.BodyCount];
_colorReader = _sensor.ColorFrameSource.OpenReader();
_colorReader.FrameArrived += ColorReader_FrameArrived;
_bodyReader = _sensor.BodyFrameSource.OpenReader();
_bodyReader.FrameArrived += BodyReader_FrameArrived;
// 2) Initialize the face source with the desired features
_faceSource = new FaceFrameSource(_sensor, 0, FaceFrameFeatures.BoundingBoxInColorSpace |
FaceFrameFeatures.FaceEngagement |
FaceFrameFeatures.Glasses |
FaceFrameFeatures.Happy |
FaceFrameFeatures.LeftEyeClosed |
FaceFrameFeatures.MouthOpen |
FaceFrameFeatures.PointsInColorSpace |
FaceFrameFeatures.RightEyeClosed);
_faceReader = _faceSource.OpenReader();
_faceReader.FrameArrived += FaceReader_FrameArrived;
}
}
开发者ID:jeremyjohnston,项目名称:kinect-2-face-basics,代码行数:30,代码来源:MainPage.xaml.cs
示例6: MainPage
public MainPage()
{
InitializeComponent();
_navigationHelper = new NavigationHelper(this);
_sensor = KinectSensor.GetDefault();
if (_sensor != null)
{
_sensor.Open();
_reader = _sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth | FrameSourceTypes.Infrared | FrameSourceTypes.Body);
_reader.MultiSourceFrameArrived += Reader_MultiSourceFrameArrived;
_gestureController = new GestureController();
_gestureController.GestureRecognized += GestureController_GestureRecognized;
}
// Initialize the gesture detection objects for our gestures
this.gestureDetectorList = new List<GestureDetector>();
// Create a gesture detector for each body (6 bodies => 6 detectors)
int maxBodies = this._sensor.BodyFrameSource.BodyCount;
for (int i = 0; i < maxBodies; ++i)
{
GestureResultView result =
new GestureResultView(i, false, false, 0.0f);
GestureDetector detector =
new GestureDetector(this._sensor, result);
result.PropertyChanged += GestureResult_PropertyChanged;
this.gestureDetectorList.Add(detector);
}
}
开发者ID:akashdeepgoel,项目名称:Identified-Flying-Object,代码行数:35,代码来源:MainPage.xaml.cs
示例7: Start
void Start ()
{
_Sensor = KinectSensor.GetDefault();
if (_Sensor != null)
{
_Reader = _Sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth);
var colorFrameDesc = _Sensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Rgba);
ColorWidth = colorFrameDesc.Width;
ColorHeight = colorFrameDesc.Height;
_ColorTexture = new Texture2D(colorFrameDesc.Width, colorFrameDesc.Height, TextureFormat.RGBA32, false);
_ColorData = new byte[colorFrameDesc.BytesPerPixel * colorFrameDesc.LengthInPixels];
var depthFrameDesc = _Sensor.DepthFrameSource.FrameDescription;
_DepthData = new ushort[depthFrameDesc.LengthInPixels];
_DepthTexture = new Texture2D( depthFrameDesc.Width, depthFrameDesc.Height, TextureFormat.ARGB32, false );
if (!_Sensor.IsOpen)
{
_Sensor.Open();
}
}
}
开发者ID:ly774508966,项目名称:Kinect-Puppets-Unity,代码行数:26,代码来源:MultiSourceManager.cs
示例8: GestureDetector
/// <summary>
/// Initializes a new instance of the GestureDetector class along with the gesture frame source and reader
/// </summary>
/// <param name="kinectSensor">Active sensor to initialize the VisualGestureBuilderFrameSource object with</param>
/// <param name="gestureResultView">GestureResultView object to store gesture results of a single body to</param>
public GestureDetector(KinectSensor kinectSensor, KinectManager kinectManager)
{
if (kinectSensor == null)
{
throw new ArgumentNullException("kinectSensor");
}
// create the vgb source. The associated body tracking ID will be set when a valid body frame arrives from the sensor.
this.vgbFrameSource = VisualGestureBuilderFrameSource.Create(kinectSensor, 0);
this.vgbFrameSource.TrackingIdLost += this.Source_TrackingIdLost;
// open the reader for the vgb frames
this.vgbFrameReader = this.vgbFrameSource.OpenReader();
if (this.vgbFrameReader != null)
{
this.vgbFrameReader.IsPaused = true;
this.vgbFrameReader.FrameArrived += this.Reader_GestureFrameArrived;
}
// load the 'Seated' gesture from the gesture database
using (VisualGestureBuilderDatabase database = VisualGestureBuilderDatabase.Create(this.gestureDatabase))
{
// we could load all available gestures in the database with a call to vgbFrameSource.AddGestures(database.AvailableGestures),
// but for this program, we only want to track one discrete gesture from the database, so we'll load it by name
foreach (Gesture gesture in database.AvailableGestures)
{
if (gesture.Name.Equals(this.shootingGestureName))
{
this.vgbFrameSource.AddGesture(gesture);
}
}
}
}
开发者ID:oliverfei,项目名称:KinectShooting,代码行数:38,代码来源:GestureDetector.cs
示例9: OnNavigatedTo
protected override void OnNavigatedTo( NavigationEventArgs e )
{
base.OnNavigatedTo( e );
try {
// Kinectを開く
kinect = KinectSensor.GetDefault();
if ( kinect == null ) {
throw new Exception( "Kinectを開けません" );
}
kinect.Open();
// 赤外線画像の情報を取得する
infraredFrameDesc = kinect.InfraredFrameSource.FrameDescription;
// 画像化のためのバッファを作成する
infraredBitmapBuffer = new byte[infraredFrameDesc.LengthInPixels * 4];
infraredBitmap = new WriteableBitmap(
infraredFrameDesc.Width, infraredFrameDesc.Height );
ImageInfrared.Source = infraredBitmap;
infraredBuffer = new ushort[infraredFrameDesc.LengthInPixels];
// 赤外線画像リーダーを開く
infraredFrameReader = kinect.InfraredFrameSource.OpenReader();
infraredFrameReader.FrameArrived += infraredFrameReader_FrameArrived;
}
catch ( Exception ex ) {
MessageDialog dlg = new MessageDialog( ex.Message );
dlg.ShowAsync();
}
}
开发者ID:noa99kee,项目名称:K4W2-Book,代码行数:33,代码来源:MainPage.xaml.cs
示例10: OnNavigatedTo
protected override void OnNavigatedTo( NavigationEventArgs e )
{
base.OnNavigatedTo( e );
try {
// Kinectを開く
kinect = KinectSensor.GetDefault();
kinect.Open();
// 表示のためのデータを作成
depthFrameDesc = kinect.DepthFrameSource.FrameDescription;
// Depthリーダーを開く
depthFrameReader = kinect.DepthFrameSource.OpenReader();
depthFrameReader.FrameArrived += depthFrameReader_FrameArrived;
// 表示のためのデータ
depthBitmap = new WriteableBitmap( depthFrameDesc.Width,
depthFrameDesc.Height );
ImageDepth.Source = depthBitmap;
depthBuffer = new ushort[depthFrameDesc.LengthInPixels];
depthBitmapBuffer = new byte[depthFrameDesc.LengthInPixels * 4];
depthPoint = new Point( depthFrameDesc.Width / 2,
depthFrameDesc.Height / 2 );
}
catch ( Exception ex ) {
MessageDialog dlg = new MessageDialog(ex.Message);
dlg.ShowAsync();
}
}
开发者ID:noa99kee,项目名称:K4W2-Book,代码行数:32,代码来源:MainPage.xaml.cs
示例11: Start
// Use this for initialization
void Start () {
print("START");
/*Get Kinect Sensor and start reading data*/
//Get Default Kinect Sensor
sensor = null;
sensor = KinectSensor.GetDefault();
if( sensor != null )
{
//We have a sensor connected
print("SENSOR CONNECTED");
//Open the connection/Start reading the data
reader = sensor.BodyFrameSource.OpenReader();
if( !sensor.IsOpen )
{
sensor.Open();
}
} else
{
print("NO KINECT CONNECTED");
}
print(sensor);
}
开发者ID:SamNChiet,项目名称:KinectVR-Hackathon,代码行数:30,代码来源:BodyManager.cs
示例12: MainPage
public MainPage()
{
// one sensor is currently supported
this.kinectSensor = KinectSensor.GetDefault();
SetupCurrentDisplay(DEFAULT_DISPLAYFRAMETYPE);
this.multiSourceFrameReader =
this.kinectSensor.OpenMultiSourceFrameReader(
FrameSourceTypes.Infrared
| FrameSourceTypes.Color
| FrameSourceTypes.Depth);
this.multiSourceFrameReader.MultiSourceFrameArrived +=
this.Reader_MultiSourceFrameArrived;
// set IsAvailableChanged event notifier
this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;
// use the window object as the view model in this simple example
this.DataContext = this;
// open the sensor
this.kinectSensor.Open();
this.InitializeComponent();
}
开发者ID:horace-hou,项目名称:Kinect-Project,代码行数:27,代码来源:mainpage.xaml.cs
示例13: Start
// Use this for initialization
void Start () {
mySensor = KinectSensor.GetDefault();
if (mySensor != null)
{
// Total array of data representing a single rendered frame
colorFrameData = new byte[colorWidth * colorHeight * bytes_per_pixel];
backgroundTex = new Texture2D(colorWidth, colorHeight, TextureFormat.BGRA32, false);
if (!mySensor.IsOpen)
{
mySensor.Open();
}
msFrameReader = mySensor.OpenMultiSourceFrameReader(FrameSourceTypes.Color);
//Rendering user as part of the Unity Scene background via Main Camera
Rect cameraRect = Camera.main.pixelRect;
float rectHeight = cameraRect.height;
float rectWidth = cameraRect.width;
if (rectWidth > rectHeight)
rectWidth = rectHeight * colorWidth / colorHeight;
else
rectHeight = rectWidth * colorHeight / colorWidth;
float foregroundOfsX = (cameraRect.width - rectWidth) / 2;
float foregroundOfsY = (cameraRect.height - rectHeight) / 2;
foregroundImgRect = new Rect(foregroundOfsX, foregroundOfsY, rectWidth, rectHeight);
foregroundGuiRect = new Rect(foregroundOfsX, cameraRect.height - foregroundOfsY, rectWidth, -rectHeight);
}
} //End of Start()
开发者ID:iEmily,项目名称:nutra-ninja,代码行数:35,代码来源:MyColorDataDisplay.cs
示例14: FaceTracker
/// <summary>
/// Initializes a new instance of the FaceTracker class from a reference of the Kinect device.
/// <param name="sensor">Reference to kinect sensor instance</param>
/// </summary>
public FaceTracker(KinectSensor sensor)
{
if (sensor == null) {
throw new ArgumentNullException("sensor");
}
if (!sensor.ColorStream.IsEnabled) {
throw new InvalidOperationException("Color stream is not enabled yet.");
}
if (!sensor.DepthStream.IsEnabled) {
throw new InvalidOperationException("Depth stream is not enabled yet.");
}
this.operationMode = OperationMode.Kinect;
this.coordinateMapper = sensor.CoordinateMapper;
this.initializationColorImageFormat = sensor.ColorStream.Format;
this.initializationDepthImageFormat = sensor.DepthStream.Format;
var newColorCameraConfig = new CameraConfig(
(uint)sensor.ColorStream.FrameWidth,
(uint)sensor.ColorStream.FrameHeight,
sensor.ColorStream.NominalFocalLengthInPixels,
FaceTrackingImageFormat.FTIMAGEFORMAT_UINT8_B8G8R8X8);
var newDepthCameraConfig = new CameraConfig(
(uint)sensor.DepthStream.FrameWidth,
(uint)sensor.DepthStream.FrameHeight,
sensor.DepthStream.NominalFocalLengthInPixels,
FaceTrackingImageFormat.FTIMAGEFORMAT_UINT16_D13P3);
this.Initialize(newColorCameraConfig, newDepthCameraConfig, IntPtr.Zero, IntPtr.Zero, this.DepthToColorCallback);
}
开发者ID:ushadow,项目名称:handinput,代码行数:35,代码来源:FaceTracker.cs
示例15: MainPage
public MainPage()
{
// one sensor is currently supported
this.kinectSensor = KinectSensor.GetDefault();
// get the infraredFrameDescription from the InfraredFrameSource
FrameDescription infraredFrameDescription = this.kinectSensor.InfraredFrameSource.FrameDescription;
// open the reader for the infrared frames
this.infraredFrameReader = this.kinectSensor.InfraredFrameSource.OpenReader();
// wire handler for frame arrival
this.infraredFrameReader.FrameArrived += this.Reader_InfraredFrameArrived;
// allocate space to put the pixels being received and converted
this.infraredFrameData = new ushort[infraredFrameDescription.Width * infraredFrameDescription.Height];
this.infraredPixels = new byte[infraredFrameDescription.Width * infraredFrameDescription.Height * BytesPerPixel];
// create the bitmap to display
this.bitmap = new WriteableBitmap(infraredFrameDescription.Width, infraredFrameDescription.Height);
this.CurrentFrameDescription = infraredFrameDescription;
// set IsAvailableChanged event notifier
this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;
// use the window object as the view model in this simple example
this.DataContext = this;
// open the sensor
this.kinectSensor.Open();
this.InitializeComponent();
}
开发者ID:ruscles,项目名称:tutorial,代码行数:34,代码来源:MainPage.xaml.cs
示例16: Start
// Use this for initialization
void Start()
{
mySensor = KinectSensor.GetDefault();
if (mySensor != null)
{
if (!mySensor.IsOpen)
{
mySensor.Open();
}
ninjaTex = new Texture2D(colorWidth, colorHeight, TextureFormat.BGRA32, false);
msFrameReader = mySensor.OpenMultiSourceFrameReader(FrameSourceTypes.Body | FrameSourceTypes.Depth |
FrameSourceTypes.BodyIndex);
// There has to be a more efficient way of tracking these (i.e. using OOP)
rightHandQueue_X = new Queue<float>();
rightHandQueue_Y = new Queue<float>();
rightHandQueue_T = new Queue<float>();
leftHandQueue_X = new Queue<float>();
leftHandQueue_Y = new Queue<float>();
leftHandQueue_T = new Queue<float>();
rightFootQueue_X = new Queue<float>();
rightFootQueue_Y = new Queue<float>();
rightFootQueue_T = new Queue<float>();
leftFootQueue_X = new Queue<float>();
leftFootQueue_Y = new Queue<float>();
leftFootQueue_T = new Queue<float>();
/** Construct StreamWriter object for collecting user data **/
sw_v = new StreamWriter("EMILY_V.txt");
sw_t = new StreamWriter("EMILY_T.txt");
sw_x = new StreamWriter("EMILY_X.txt");
sw_y = new StreamWriter("EMILY_Y.txt");
InitializeSlashRenderer();
//Rendering user as part of the Unity Scene background via Main Camera
Rect cameraRect = Camera.main.pixelRect;
float rectHeight = cameraRect.height;
float rectWidth = cameraRect.width;
if (rectWidth > rectHeight)
rectWidth = rectHeight * colorWidth / colorHeight;
else
rectHeight = rectWidth * colorHeight / colorWidth;
float foregroundOfsX = (cameraRect.width - rectWidth) / 2;
float foregroundOfsY = (cameraRect.height - rectHeight) / 2;
foregroundImgRect = new Rect(foregroundOfsX, foregroundOfsY, rectWidth, rectHeight);
foregroundGuiRect = new Rect(foregroundOfsX, cameraRect.height - foregroundOfsY, rectWidth, -rectHeight);
}
}
开发者ID:iEmily,项目名称:nutra-ninja,代码行数:60,代码来源:JointGestureManager.cs
示例17: Start
void Start () {
_Sensor = KinectSensor.GetDefault();
if (_Sensor != null) {
_Reader = _Sensor.BodyFrameSource.OpenReader();
if (!_Sensor.IsOpen)
_Sensor.Open();
}
}
开发者ID:patricio272,项目名称:VoxelVR,代码行数:8,代码来源:SkeletonManager.cs
示例18: ZigInputKinectOne
public ZigInputKinectOne()
{
_sensor = KinectSensor.GetDefault();
_mapper = _sensor.CoordinateMapper;
_depth = new KinectOneDepth(_sensor);
_image = new KinectOneImage(_sensor);
_labelMap = new KinectOneLabelMap(_sensor);
}
开发者ID:josephMG,项目名称:KinectOneForZigFu,代码行数:8,代码来源:ZigInputKinectOne.cs
示例19: Start
void Start()
{
_Sensor = KinectSensor.GetDefault();
if (_Sensor != null)
{
_Reader = _Sensor.DepthFrameSource.OpenReader();
_Data = new ushort[_Sensor.DepthFrameSource.FrameDescription.LengthInPixels];
}
}
开发者ID:Ran4,项目名称:Blopper,代码行数:10,代码来源:DepthSourceManager.cs
示例20: MainPage
public MainPage()
{
// one sensor is currently supported
this.kinectSensor = KinectSensor.GetDefault();
// open the sensor
this.kinectSensor.Open();
this.InitializeComponent();
}
开发者ID:kutabar,项目名称:tutorial,代码行数:10,代码来源:MainPage.xaml.cs
注:本文中的KinectSensor类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论