您的位置:首页 > 编程语言 > C#

Kinect Fusion Basics-WPF C# Sample代码解析

2016-09-22 17:01 330 查看
【原文:http://feichizhongwu.blog.51cto.com/8606684/1361339

各位博友,这是我第一次写技术博客,有编辑方面的不足或者技术方面不足的,请诸位谅解。

本人这段时间,一直研读微软SDK官方样例kinect fusion代码,总体理解程序流程,在这发一篇小博客供大家交流学习。由于工程代码较长,所以本人打算直接copy代码,但加上自己理解注释,希望同该方向研究的人有所借鉴。



///<summary>

/// 该程序是基于CSharp的WPF编程,是基于事件触发的,程序主要事件有WindowLoaded,ProcessDepthData,

/// ResetReconstruction,SensorDepthFrameReady,FpsTimerTick.编程思想为:窗口初始化时候,便会调

/// 用WindowLoaded进行一些类成员 变量的初始化,并设置了时钟,最后ResetReconstruction了一下,

/// 激活kinect进行数据捕获。帧会更新,更新会触发事件SensorDepthFrameReady,由其中

/// ProcessDepthData处理深度数据并负责深度图的显示,当跟踪错误次数超过默认的

/// 次数之后,便会调用ResetReconstruction一下。直到WindowClosing之后才关闭应用。

/// </summary>

namespace Microsoft.Samples.Kinect.KinectFusionBasics

{

using System;

using System.Diagnostics;

using System.IO;

using System.Windows;

using System.Windows.Media;

using System.Windows.Media.Imaging;

using System.Windows.Threading;

using Microsoft.Kinect;

using Microsoft.Kinect.Toolkit.Fusion;

/// <summary>

/// Interaction logic for MainWindow.xaml

/// </summary>

public partial class MainWindow : Window, IDisposable

{

/// <summary>

/// Max tracking error count, we will reset the reconstruction if tracking errors

/// reach this number

/// </summary>

///最大跟踪错误数目

private const int MaxTrackingErrors = 100;

/// <summary>

/// If set true, will automatically reset the reconstruction when MaxTrackingErrors have occurred

/// </summary>

/// 最大跟踪错误之后,如果该变量为true,则自动更新reconstruction

///在ProcessDepthData中调用

private const bool AutoResetReconstructionWhenLost = false;

/// <summary>

/// The resolution of the depth image to be processed.

/// </summary>

/// DepthImageFormat是一个枚举类型,有Undefined, Resolution640x480Fps30, Resolution320x240Fps30,Resolution80x60Fps30,

private const DepthImageFormat DepthImageResolution = DepthImageFormat.Resolution640x480Fps30;

/// <summary>

/// The seconds interval to calculate FPS

/// </summary>

private const int FpsInterval = 10;

///<summary>

/// 调整以下几个变量,可以改变成像立方体的大小,也就可以改变重建范围

/// </summary>

///

/// <summary>

/// The reconstruction volume voxel density in voxels per meter (vpm)

/// vpm表示每米有多少个voxel,是一种密度

/// 1000mm / 256vpm = ~3.9mm/voxel

/// </summary>

/// 方块立体单元

private const int VoxelsPerMeter = 256;

/// <summary>

/// The reconstruction volume voxel resolution in the X axis

/// At a setting of 256vpm the volume is 512 / 256 = 2m wide

/// </summary>

/// X方向的像素总长度

private const int VoxelResolutionX = 512;

/// <summary>

/// The reconstruction volume voxel resolution in the Y axis

/// At a setting of 256vpm the volume is 384 / 256 = 1.5m high

/// </summary>

/// Y方向总长度

private const int VoxelResolutionY = 384;

/// <summary>

/// The reconstruction volume voxel resolution in the Z axis

/// At a setting of 256vpm the volume is 512 / 256 = 2m deep

/// </summary>

private const int VoxelResolutionZ = 512;

/// <summary>

/// The reconstruction volume processor type. This parameter sets whether AMP or CPU processing

/// is used. Note that CPU processing will likely be too slow for real-time processing.

/// </summary>

/// 重建处理器类型,是CPU重建还是AMP重建(GPU实时重建)

private const ReconstructionProcessor ProcessorType = ReconstructionProcessor.Amp;

/// <summary>

/// The zero-based device index to choose for reconstruction processing if the

/// ReconstructionProcessor AMP options are selected.

/// Here we automatically choose a device to use for processing by passing -1,

/// </summary>

private const int DeviceToUse = -1;

/// <summary>

/// Parameter to translate the reconstruction based on the minimum depth setting. When set to

/// false, the reconstruction volume +Z axis starts at the camera lens and extends into the scene.

/// Setting this true in the constructor will move the volume forward along +Z away from the

/// camera by the minimum depth threshold to enable capture of very small reconstruction volumes

/// by setting a non-identity world-volume transformation in the ResetReconstruction call.

/// Small volumes should be shifted, as the Kinect hardware has a minimum sensing limit of ~0.35m,

/// inside which no valid depth is returned, hence it is difficult to initialize and track robustly

/// when the majority of a small volume is inside this distance.

/// </summary>

private bool translateResetPoseByMinDepthThreshold = true;

/// <summary>

/// Minimum depth distance threshold in meters. Depth pixels below this value will be

/// returned as invalid (0). Min depth must be positive or 0.

/// </summary>

/// DefaultMinimumDepth=0.5f,Min depth为正数

private float minDepthClip = FusionDepthProcessor.DefaultMinimumDepth;

/// <summary>

/// Maximum depth distance threshold in meters. Depth pixels above this value will be

/// returned as invalid (0). Max depth must be greater than 0.

/// </summary>

/// DefaultMaximumDepth=8.0f

private float maxDepthClip = FusionDepthProcessor.DefaultMaximumDepth;

/// <summary>

/// Active Kinect sensor

/// </summary>

private KinectSensor sensor;

/// <summary>

/// Bitmap that will hold color information

/// </summary>

/// 在WindowLoaded中赋值,并在在其中绑定Image控件

private WriteableBitmap colorBitmap;

/// <summary>

/// Intermediate storage for the depth data converted to color

/// </summary>

/// 缓存,用于保存转化为颜色值的深度数据

private int[] colorPixels;

/// <summary>

/// Intermediate storage for the depth float data converted from depth image frame

/// </summary>

/// 从深度帧转化的浮点型深度数据

private FusionFloatImageFrame depthFloatBuffer;

/// <summary>

/// Intermediate storage for the point cloud data converted from depth float image frame

/// </summary>

/// 从深度帧转化而来的点云数据

private FusionPointCloudImageFrame pointCloudBuffer;

/// <summary>

/// Raycast shaded surface image

/// </summary>

private FusionColorImageFrame shadedSurfaceColorFrame;

/// <summary>

/// The transformation between the world and camera view coordinate system

/// </summary>

/// 变换矩阵

private Matrix4 worldToCameraTransform;

/// <summary>

/// The default transformation between the world and volume coordinate system

/// </summary>

private Matrix4 defaultWorldToVolumeTransform;

/// <summary>

/// The Kinect Fusion volume

/// </summary>

private Reconstruction volume;

/// <summary>

/// The timer to calculate FPS

/// </summary>

/// System.Windows.Threading命名空间,计时器

/// 帧率 = processedFrameCount / FpsInterval

private DispatcherTimer fpsTimer;

/// <summary>

/// The count of the frames processed in the FPS interval

/// </summary>

/// 在采集间隔(FpsInterval=10)内,处理帧的数目

private int processedFrameCount;

/// <summary>

/// The tracking error count

/// </summary>

/// 用于保存连续跟踪错误次数

private int trackingErrorCount;

/// <summary>

/// The sensor depth frame data length

/// </summary>

private int frameDataLength;

/// <summary>

/// wether the depth frames to be processed

/// </summary>

private bool processingFrame;

/// <summary>

/// Track whether Dispose has been called

/// </summary>

private bool disposed;

/// <summary>

/// Initializes a new instance of the MainWindow class.

/// </summary>

public MainWindow()

{

this.InitializeComponent();

}

/// <summary>

/// Finalizes an instance of the MainWindow class.

/// This destructor will run only if the Dispose method does not get called.

/// </summary>

~MainWindow()

{

this.Dispose(false);

}

/// <summary>

/// Get the image size of fusion images and bitmap.

/// </summary>

/// 返回深度图像的大小,DepthImageResolution为上面全局变量设置,该变量ImageSize为结构体变量,可以直接调用

public static Size ImageSize

{

get

{//GetImageSize函数为私有定义函数,在以下有定义

//DepthImageResolution=DepthImageFormat.Resolution640x480Fps30;则返回Size(640,480)

return GetImageSize(DepthImageResolution);

}

}

/// <summary>

/// Dispose the allocated frame buffers and reconstruction.

/// </summary>

/// 内存释放函数,和Dispose(bool disposing)结合使用

public void Dispose()

{

this.Dispose(true);

// This object will be cleaned up by the Dispose method.

GC.SuppressFinalize(this);

}

/// <summary>

/// Frees all memory associated with the FusionImageFrame.

/// </summary>

/// <param name="disposing">Whether the function was called from Dispose.</param>

/// 内存释放函数

protected virtual void Dispose(bool disposing)

{

if (!this.disposed)

{

if (null != this.depthFloatBuffer)

{//depthFloatBuffer为深度数据缓冲储存区

this.depthFloatBuffer.Dispose();

}

if (null != this.pointCloudBuffer)

{

this.pointCloudBuffer.Dispose();

}

if (null != this.shadedSurfaceColorFrame)

{

this.shadedSurfaceColorFrame.Dispose();

}

if (null != this.volume)

{

this.volume.Dispose();

}

this.disposed = true;

}

}

/// <summary>

/// Get the depth image size from the input depth image format.

/// </summary>

/// <param name="imageFormat">The depth image format.</param>

/// <returns>The widht and height of the input depth image format.</returns>

/// 在ImageSize被调用

/// 输入深度图像的格式

private static Size GetImageSize(DepthImageFormat imageFormat)

{

switch (imageFormat)

{

case DepthImageFormat.Resolution320x240Fps30:

return new Size(320, 240);

case DepthImageFormat.Resolution640x480Fps30:

return new Size(640, 480);

case DepthImageFormat.Resolution80x60Fps30:

return new Size(80, 60);

}

throw new ArgumentOutOfRangeException("imageFormat");

}

/// <summary>

/// Execute startup tasks

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

/// WPF窗口初始化加载函数

/// 初始化了sensor和fpsTimer两个变量

private void WindowLoaded(object sender, RoutedEventArgs e)

{

// Look through all sensors and start the first connected one.

// This requires that a Kinect is connected at the time of app startup.

// To make your app robust against plug/unplug,

// it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit

//功能:遍历所有kinect sensors并开始第一个连接上的kinect

//这个函数要求在app启动之前至少要有一个kinect接入电脑

//.............

//要求必须使用Microsoft.Kinect.Toolkit中的KinectSensorChooser进行编程

//KinectSensor.KinectSensors返回一个kinect设备数组

//该循环功能为了找出具备能力的kinect设备

foreach (var potentialSensor in KinectSensor.KinectSensors)

{

if (potentialSensor.Status == KinectStatus.Connected)

{//sensor为全局变量,表示该kinect

this.sensor = potentialSensor;

break;

}

}

//如果没有可以使用的kinect sensor,则退出窗口初始化

if (null == this.sensor)

{//statusBarText为窗口提示栏名,跟踪之后,在状态栏中输出字符串“NoKinectReady”

this.statusBarText.Text = Properties.Resources.NoKinectReady;

return;

}

// Turn on the depth stream to receive depth frames

//打开深度流以接收深度数据

//Enable()为Overloaded. Methods for enabling a sensor to stream out depth data.

//DepthImageStream.Enable ()为默认格式DepthImageFormat.Resolution640x480Fps30.

this.sensor.DepthStream.Enable(DepthImageResolution);

//frameDataLength为深度数据总长度

this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength;

// Allocate space to put the color pixels we'll create

this.colorPixels = new int[this.frameDataLength];

// This is the bitmap we'll display on-screen

//C#函数: WriteableBitmap(int pixelWidth, int pixelHeight,

//double dpiX, double dpiY, PixelFormat pixelFormat, BitmapPalette palette);

this.colorBitmap = new WriteableBitmap(

(int)ImageSize.Width,

(int)ImageSize.Height,

96.0,//起点X坐标

96.0,//起点Y坐标

PixelFormats.Bgr32,//图片格式

null);//画板对象

// Set the image we display to point to the bitmap where we'll put the image data

//Image为图片控件名

this.Image.Source = this.colorBitmap;

// Add an event handler to be called whenever there is new depth frame data

//SensorDepthFrameReady为深度帧数据更新的处理函数,也是大的处理框架

//即下一帧深度准备好时候,就调用SensorDepthFrameReady更新数据

this.sensor.DepthFrameReady += this.SensorDepthFrameReady;

//VoxelsPerMeter = 256; X:2m Y:1.5m高 Z:2m深

//volParam为Voxel重建的参数实例对象,用于保存重建参数

var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ);

// Set the world-view transform to identity, so the world origin is the initial camera location.

this.worldToCameraTransform = Matrix4.Identity;

try

{

// This creates a volume cube with the Kinect at center of near plane, and volume directly

// in front of Kinect.

//创建一个重建立方体

//ProcessorType = ReconstructionProcessor.Amp;GPU重建

//this.worldToCameraTransform = Matrix4.Identity;

//DeviceToUse=-1

this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform);

this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform();

//this.translateResetPoseByMinDepthThreshold=true

// Setting this true in the constructor will move the volume forward along +Z away from the

// camera by the minimum depth threshold to enable capture of very small reconstruction volumes

// by setting a non-identity world-volume transformation in the ResetReconstruction call.

if (this.translateResetPoseByMinDepthThreshold)

{

//ResetReconstruction()为主要处理函数

this.ResetReconstruction();

}

}

catch (InvalidOperationException ex)

{//statusBarText为控件名字

this.statusBarText.Text = ex.Message;

return;

}

catch (DllNotFoundException)

{

this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite;

return;

}

//初始化数据空间

// Depth frames generated from the depth input

this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

// Point cloud frames generated from the depth float input

this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

// Create images to raycast the Reconstruction Volume

this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height);

// Start the sensor!

try

{

this.sensor.Start();

}

catch (IOException ex)

{

// Device is in use

this.sensor = null;

this.statusBarText.Text = ex.Message;

return;

}

catch (InvalidOperationException ex)

{

// Device is not valid, not supported or hardware feature unavailable

this.sensor = null;

this.statusBarText.Text = ex.Message;

return;

}

// Set Near Mode by default

try

{

this.sensor.DepthStream.Range = DepthRange.Near;

checkBoxNearMode.IsChecked = true;

}

catch

{

// device not near mode capable

}

// Initialize and start the FPS timer

this.fpsTimer = new DispatcherTimer();

//FpsTimerTick为时钟处理响应函数

//private const int FpsInterval = 10

//每10s计时一次 ,并触发一次计时器,便执行FpsTimerTick

this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick);

this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval);//时,分,秒

this.fpsTimer.Start();

// Reset the reconstruction

this.ResetReconstruction();

}

/// <summary>

/// Execute shutdown tasks

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

private void WindowClosing(object sender, System.ComponentModel.CancelEventArgs e)

{

if (null != this.fpsTimer)

{

this.fpsTimer.Stop();

}

if (null != this.sensor)

{

this.sensor.Stop();

}

}

/// <summary>

/// Update the FPS reading in the status text bar

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

/// 将帧率信息显示在状态栏上,买10s计算一次帧率,并实现显示在状态栏

private void FpsTimerTick(object sender, EventArgs e)

{

// Update the FPS reading

//FpsInterval=10为常数

this.statusBarText.Text = string.Format(

System.Globalization.CultureInfo.InvariantCulture,

Properties.Resources.Fps,

(double)this.processedFrameCount / FpsInterval);

// Reset the frame count,该变量在 ProcessDepthData函数中被更新,

//当成功处理帧数据一次,就更新一次数据

this.processedFrameCount = 0;

}

/// <summary>

/// Event handler for Kinect sensor's DepthFrameReady event

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

/// 该函数为新帧准备好时被调用,在WindowLoaded函数中被声明

private void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)

{

using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())

{

if (depthFrame != null && !this.processingFrame)

{

//this.frameDataLength为帧图像元素的长度,在WindowLoaded函数中被赋值

var depthPixels = new DepthImagePixel[this.frameDataLength];

// Copy the pixel data from the image to a temporary array

//从深度图中提取深度数据到暂时数组中

depthFrame.CopyDepthImagePixelDataTo(depthPixels);

this.Dispatcher.BeginInvoke(

DispatcherPriority.Background,

(Action<DepthImagePixel[]>)((d) => { this.ProcessDepthData(d); }),

depthPixels);

// Mark that one frame will be processed

//标记新的帧数据可以被处理,处理完之后,赋值为false

this.processingFrame = true;

}

}

}

/// <summary>

/// Process the depth input

/// </summary>

/// <param name="depthPixels">The depth data array to be processed</param>

/// 处理输入深度数据

/// SensorDepthFrameReady函数中,调用了ProcessDepthData函数,SensorDepthFrameReady中确定了

/// 作为参数的深度数据数组(类型DepthImagePixel)

private void ProcessDepthData(DepthImagePixel[] depthPixels)

{

//Assert检查提示,如果条件错误,就会输出提示 ,并显示一个信息框

//以下三个变量均在WindowLoaded中被赋值

Debug.Assert(null != this.volume, "volume should be initialized");

Debug.Assert(null != this.shadedSurfaceColorFrame, "shaded surface should be initialized");

Debug.Assert(null != this.colorBitmap, "color bitmap should be initialized");

//depthFloatBuffer在WindowLoaded中被声明空间,但没有储存数据

//将深度图帧转化为深度浮点数据帧

try

{

// Convert the depth image frame to depth float image frame

// Converts Kinect depth frames in unsigned short format to depth frames in float format

// representing distance from the camera in meters (parallel to the optical center axis).

FusionDepthProcessor.DepthToDepthFloatFrame(

depthPixels,//输入数据数组

(int)ImageSize.Width,

(int)ImageSize.Height,

this.depthFloatBuffer,//缓冲数据区,前13位

FusionDepthProcessor.DefaultMinimumDepth,//0.35f

FusionDepthProcessor.DefaultMaximumDepth,//8.0f

false);

//ProcessFrame函数:

//处理每一帧的函数(On_GPU)

//有两个功能:1.AlignDepthFloatToReconstruction:对其深度浮点型数据depthFloatBuffer到重建立方体当中。

//2.IntegrateFrame:使得帧融合在一起

// After this call completes, if a visible output image of the reconstruction

// is required, the user can call CalculatePointCloud and then ShadePointCloud.

// The maximum image resolution supported in this function is 640x480.

// ProcessFrame will first calculate the camera pose and then integrate

// if tracking is successful

//成功返回true,如果在对齐输入深度数据有问题时候并不能计算出有用的变换,则返回false

bool trackingSucceeded = this.volume.ProcessFrame(

this.depthFloatBuffer,//输入深度浮点数据

FusionDepthProcessor.DefaultAlignIterationCount,//最大对齐次数

FusionDepthProcessor.DefaultIntegrationWeight,//最大融合权重

this.volume.GetCurrentWorldToCameraTransform());//最新一次的变换矩阵,则需要在以下更新变化矩阵

// If camera tracking failed, no data integration or raycast for reference

// point cloud will have taken place, and the internal camera pose

// will be unchanged.

// 成功返回true,如果在对齐输入深度数据有问题时候并不能计算出有用的变换,则返回false

if (!trackingSucceeded)

{

//The tracking error count跟踪错误总次数

//用于记录连续跟踪错误次数

this.trackingErrorCount++;

// Show tracking error on status bar

this.statusBarText.Text = Properties.Resources.CameraTrackingFailed;

}

else//跟踪成功

{

Matrix4 calculatedCameraPose = this.volume.GetCurrentWorldToCameraTransform();

// Set the camera pose and reset tracking errors

this.worldToCameraTransform = calculatedCameraPose;//更新(世界坐标---相机坐标)变换矩阵

//更新该变量,新的一次相机正常tracking,则需要更新该变量

this.trackingErrorCount = 0;

}

//达到最大跟踪错误次数之后,则自动更新reconstruction

if (AutoResetReconstructionWhenLost && !trackingSucceeded && this.trackingErrorCount == MaxTrackingErrors)

{

// Auto Reset due to bad tracking

this.statusBarText.Text = Properties.Resources.ResetVolume;

// Automatically Clear Volume and reset tracking if tracking fails

this.ResetReconstruction();

}

//用于显示到Image控件上,即reconstruction如果需要显示出来,就需要计算出PointCloud和ShadePointCloud

// Calculate the point cloud

//返回3D坐标数据点和一个零交叉稠密表面的标准。保存到pointCloudBuffer中

this.volume.CalculatePointCloud(this.pointCloudBuffer, this.worldToCameraTransform);

// Shade point cloud and render

FusionDepthProcessor.ShadePointCloud(

this.pointCloudBuffer,//输入数据

this.worldToCameraTransform,

this.shadedSurfaceColorFrame,//输出数据

null);

//this.colorPixels在WindowLoaded中开辟了空间里,没有初始化数据

this.shadedSurfaceColorFrame.CopyPixelDataTo(this.colorPixels);

// Write the pixel data into our bitmap

//this.colorBitmap在WindowLoaded中开辟了空间里,没有初始化数据

this.colorBitmap.WritePixels(

new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),

this.colorPixels,//int[]

this.colorBitmap.PixelWidth * sizeof(int),

0);

// The input frame was processed successfully, increase the processed frame count

++this.processedFrameCount;

}

catch (InvalidOperationException ex)

{

this.statusBarText.Text = ex.Message;

}

finally

{

//表示该帧数据已经处理完毕,在SensorDepthFrameReady被赋值true,之后就达到这里

this.processingFrame = false;

}

}

/// <summary>

/// Reset the reconstruction to initial value

/// </summary>

/// 1.该函数在WindowLoaded中被调用了第一次初始化

///2.在 ProcessDepthData函数达到最大跟踪错误次数trackingErrorCount之后,则自动更新reconstruction

private void ResetReconstruction()

{

// Reset tracking error counter

this.trackingErrorCount = 0;

// Set the world-view transform to identity, so the world origin is the initial camera location.

this.worldToCameraTransform = Matrix4.Identity;

if (null != this.volume)

{

// Translate the reconstruction volume location away from the world origin by an amount equal

// to the minimum depth threshold. This ensures that some depth signal falls inside the volume.

// If set false, the default world origin is set to the center of the front face of the

// volume, which has the effect of locating the volume directly in front of the initial camera

// position with the +Z axis into the volume along the initial camera direction of view.

//this.translateResetPoseByMinDepthThreshold变量

//Setting this true in the constructor will move the volume forward along +Z away from the

// camera by the minimum depth threshold to enable capture of very small reconstruction volumes

// by setting a non-identity world-volume transformation in the ResetReconstruction call.

//如果为true,则将reconstruction cube 一直往远处延伸,以捕捉更小的体积,

if (this.translateResetPoseByMinDepthThreshold)

{

Matrix4 worldToVolumeTransform = this.defaultWorldToVolumeTransform;

// Translate the volume in the Z axis by the minDepthThreshold distance

float minDist = (this.minDepthClip < this.maxDepthClip) ? this.minDepthClip : this.maxDepthClip;

worldToVolumeTransform.M43 -= minDist * VoxelsPerMeter;

this.volume.ResetReconstruction(this.worldToCameraTransform, worldToVolumeTransform);

}

else

{

this.volume.ResetReconstruction(this.worldToCameraTransform);

}

}

if (null != this.fpsTimer)

{

// Reset the processed frame count and reset the FPS timer

this.fpsTimer.Stop();

this.processedFrameCount = 0;

this.fpsTimer.Start();

}

}

/// <summary>

/// Handles the user clicking on the reset reconstruction button

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

private void ButtonResetReconstructionClick(object sender, RoutedEventArgs e)

{

if (null == this.sensor)

{

this.statusBarText.Text = Properties.Resources.ConnectDeviceFirst;

return;

}

// reset the reconstruction and update the status text

this.ResetReconstruction();

this.statusBarText.Text = Properties.Resources.ResetReconstruction;

}

/// <summary>

/// Handles the checking or un-checking of the near mode combo box

/// </summary>

/// <param name="sender">object sending the event</param>

/// <param name="e">event arguments</param>

private void CheckBoxNearModeChanged(object sender, RoutedEventArgs e)

{

if (this.sensor != null)

{

// will not function on non-Kinect for Windows devices

try

{

if (this.checkBoxNearMode.IsChecked.GetValueOrDefault())

{

this.sensor.DepthStream.Range = DepthRange.Near;

}

else

{

this.sensor.DepthStream.Range = DepthRange.Default;

}

}

catch (InvalidOperationException)

{

}

}

}

}

}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: