t1 / TFDContents / Assets / KinectScripts / FacetrackingManager.cs @ 3
이력 | 보기 | 이력해설 | 다운로드 (42.6 KB)
1 |
using UnityEngine; |
---|---|
2 |
using System; |
3 |
using System.Collections; |
4 |
using System.Collections.Generic; |
5 |
//using System.Runtime.InteropServices; |
6 |
using System.Text; |
7 |
|
8 |
|
9 |
/// <summary> |
10 |
/// Facetracking manager is the component that manages the head and face tracking. |
11 |
/// </summary> |
12 |
public class FacetrackingManager : MonoBehaviour |
13 |
{ |
14 |
[Tooltip("Index of the player, tracked by this component. 0 means the 1st player, 1 - the 2nd one, 2 - the 3rd one, etc.")] |
15 |
public int playerIndex = 0; |
16 |
|
17 |
[Tooltip("Whether to poll the HD-face model data or not.")] |
18 |
public bool getFaceModelData = false; |
19 |
|
20 |
[Tooltip("Whether to display the face rectangle over the color camera feed.")] |
21 |
public bool displayFaceRect = false; |
22 |
|
23 |
[Tooltip("Time tolerance in seconds, when the face may not to be tracked, without considering it lost.")] |
24 |
public float faceTrackingTolerance = 0.5f; |
25 |
|
26 |
[Tooltip("Game object that will be used to display the HD-face model mesh in the scene.")] |
27 |
public GameObject faceModelMesh = null; |
28 |
|
29 |
[Tooltip("Whether the HD-face model mesh should be mirrored or not.")] |
30 |
private bool mirroredModelMesh = true; |
31 |
|
32 |
//[Tooltip("Whether to skip the continuous updates of the HD-face model mesh, or not.")] |
33 |
//public bool dontUpdateModelMesh = false; |
34 |
|
35 |
[Tooltip("Whether to pause the updates of the HD-face model mesh.")] |
36 |
public bool pauseModelMeshUpdates = false; |
37 |
|
38 |
public enum TextureType : int { None, ColorMap, FaceRectangle } |
39 |
[Tooltip("How the HD-face model mesh should be textured.")] |
40 |
public TextureType texturedModelMesh = TextureType.ColorMap; |
41 |
|
42 |
[Tooltip("Whether to move the face model mesh, to be the same as user's head position.")] |
43 |
public bool moveModelMesh = false; |
44 |
|
45 |
[Tooltip("Camera that may be used to overlay face mesh over the color background.")] |
46 |
public Camera foregroundCamera; |
47 |
|
48 |
[Tooltip("Scale factor for the face mesh.")] |
49 |
[Range(0.1f, 2.0f)] |
50 |
public float modelMeshScale = 1f; |
51 |
|
52 |
[Tooltip("Vertical offset of the mesh above the head (in meters).")] |
53 |
[Range(-0.5f, 0.5f)] |
54 |
public float verticalMeshOffset = 0f; |
55 |
|
56 |
[Tooltip("GUI-Text to display the FT-manager debug messages.")] |
57 |
public GUIText debugText; |
58 |
|
59 |
// // nose and head transforms |
60 |
// public Transform noseTransform; |
61 |
// public Transform headTransform; |
62 |
// public GUIText debugText2; |
63 |
|
64 |
|
65 |
// Is currently tracking user's face |
66 |
private bool isTrackingFace = false; |
67 |
private float lastFaceTrackedTime = 0f; |
68 |
|
69 |
// Skeleton ID of the tracked face |
70 |
//private long faceTrackingID = 0; |
71 |
|
72 |
// Animation units |
73 |
private Dictionary<KinectInterop.FaceShapeAnimations, float> dictAU = new Dictionary<KinectInterop.FaceShapeAnimations, float>(); |
74 |
private bool bGotAU = false; |
75 |
|
76 |
// Shape units |
77 |
private Dictionary<KinectInterop.FaceShapeDeformations, float> dictSU = new Dictionary<KinectInterop.FaceShapeDeformations, float>(); |
78 |
private bool bGotSU = false; |
79 |
|
80 |
// whether the face model mesh was initialized |
81 |
private bool bFaceModelMeshInited = false; |
82 |
private Vector3[] vMeshVertices = null; |
83 |
|
84 |
// Vertices, UV and triangles of the face model |
85 |
private Vector3[] avModelVertices = null; |
86 |
private Vector2[] avModelUV = null; |
87 |
private bool bGotModelVertices = false; |
88 |
//private bool bGotModelVerticesFromDC = false; |
89 |
|
90 |
private int[] avModelTriangles = null; |
91 |
private bool bGotModelTriangles = false; |
92 |
private bool bGotModelTrianglesFromDC = false; |
93 |
|
94 |
// Head position and rotation |
95 |
private Vector3 headPos = Vector3.zero; |
96 |
private bool bGotHeadPos = false; |
97 |
|
98 |
private Quaternion headRot = Quaternion.identity; |
99 |
private bool bGotHeadRot = false; |
100 |
|
101 |
// offset vector from head to face center |
102 |
private Vector3 faceHeadOffset = Vector3.zero; |
103 |
|
104 |
// Tracked face rectangle |
105 |
private Rect faceRect = new Rect(); |
106 |
//private bool bGotFaceRect; |
107 |
|
108 |
// primary user ID, as reported by KinectManager |
109 |
private long primaryUserID = 0; |
110 |
private long lastUserID = 0; |
111 |
|
112 |
// primary sensor data structure |
113 |
private KinectInterop.SensorData sensorData = null; |
114 |
|
115 |
// Bool to keep track of whether face-tracking system has been initialized |
116 |
private bool isFacetrackingInitialized = false; |
117 |
private bool wasFacetrackingActive = false; |
118 |
|
119 |
// The single instance of FacetrackingManager |
120 |
private static FacetrackingManager instance; |
121 |
|
122 |
// update times |
123 |
private float facePosUpdateTime = 0f; |
124 |
private float faceMeshUpdateTime = 0f; |
125 |
|
126 |
// used when dontUpdateModelMesh is true |
127 |
//private bool faceMeshGotOnce = false; |
128 |
|
129 |
// whether UpdateFaceModelMesh() is running |
130 |
private bool updateFaceMeshStarted = false; |
131 |
|
132 |
private RenderTexture faceMeshTexture = null; |
133 |
private Vector3 nosePos = Vector3.zero; |
134 |
|
135 |
/// <summary> |
136 |
/// Gets the single FacetrackingManager instance. |
137 |
/// </summary> |
138 |
/// <value>The FacetrackingManager instance.</value> |
139 |
public static FacetrackingManager Instance |
140 |
{ |
141 |
get |
142 |
{ |
143 |
return instance; |
144 |
} |
145 |
} |
146 |
|
147 |
/// <summary> |
148 |
/// Determines the facetracking system was successfully initialized, false otherwise. |
149 |
/// </summary> |
150 |
/// <returns><c>true</c> if the facetracking system was successfully initialized; otherwise, <c>false</c>.</returns> |
151 |
public bool IsFaceTrackingInitialized() |
152 |
{ |
153 |
return isFacetrackingInitialized; |
154 |
} |
155 |
|
156 |
/// <summary> |
157 |
/// Determines whether this the sensor is currently tracking a face. |
158 |
/// </summary> |
159 |
/// <returns><c>true</c> if the sensor is tracking a face; otherwise, <c>false</c>.</returns> |
160 |
public bool IsTrackingFace() |
161 |
{ |
162 |
return isTrackingFace; |
163 |
} |
164 |
|
165 |
/// <summary> |
166 |
/// Gets the current user ID, or 0 if no user is currently tracked. |
167 |
/// </summary> |
168 |
/// <returns>The face tracking I.</returns> |
169 |
public long GetFaceTrackingID() |
170 |
{ |
171 |
return isTrackingFace ? primaryUserID : 0; |
172 |
} |
173 |
|
174 |
/// <summary> |
175 |
/// Determines whether the sensor is currently tracking the face of the specified user. |
176 |
/// </summary> |
177 |
/// <returns><c>true</c> if the sensor is currently tracking the face of the specified user; otherwise, <c>false</c>.</returns> |
178 |
/// <param name="userId">User ID</param> |
179 |
public bool IsTrackingFace(long userId) |
180 |
{ |
181 |
if(sensorData != null && sensorData.sensorInterface != null) |
182 |
{ |
183 |
return sensorData.sensorInterface.IsFaceTracked(userId); |
184 |
} |
185 |
|
186 |
return false; |
187 |
} |
188 |
|
189 |
/// <summary> |
190 |
/// Gets the last face position & rotation update time, in seconds since game start. |
191 |
/// </summary> |
192 |
/// <returns>The last face position & rotation update time.</returns> |
193 |
public float GetFacePosUpdateTime() |
194 |
{ |
195 |
return facePosUpdateTime; |
196 |
} |
197 |
|
198 |
/// <summary> |
199 |
/// Gets the last face mesh update time, in seconds since game start. |
200 |
/// </summary> |
201 |
/// <returns>The last face mesh update time.</returns> |
202 |
public float GetFaceMeshUpdateTime() |
203 |
{ |
204 |
return faceMeshUpdateTime; |
205 |
} |
206 |
|
207 |
/// <summary> |
208 |
/// Gets the head position of the currently tracked user. |
209 |
/// </summary> |
210 |
/// <returns>The head position.</returns> |
211 |
/// <param name="bMirroredMovement">If set to <c>true</c> returns mirorred head position.</param> |
212 |
public Vector3 GetHeadPosition(bool bMirroredMovement) |
213 |
{ |
214 |
Vector3 vHeadPos = headPos; // bGotHeadPos ? headPos : Vector3.zero; |
215 |
|
216 |
if(!bMirroredMovement) |
217 |
{ |
218 |
vHeadPos.z = -vHeadPos.z; |
219 |
} |
220 |
|
221 |
return vHeadPos; |
222 |
} |
223 |
|
224 |
/// <summary> |
225 |
/// Gets the head position of the specified user. |
226 |
/// </summary> |
227 |
/// <returns>The head position.</returns> |
228 |
/// <param name="userId">User ID</param> |
229 |
/// <param name="bMirroredMovement">If set to <c>true</c> returns mirorred head position.</param> |
230 |
public Vector3 GetHeadPosition(long userId, bool bMirroredMovement) |
231 |
{ |
232 |
Vector3 vHeadPos = Vector3.zero; |
233 |
bool bGotPosition = sensorData.sensorInterface.GetHeadPosition(userId, ref vHeadPos); |
234 |
|
235 |
if(bGotPosition) |
236 |
{ |
237 |
if(!bMirroredMovement) |
238 |
{ |
239 |
vHeadPos.z = -vHeadPos.z; |
240 |
} |
241 |
|
242 |
return vHeadPos; |
243 |
} |
244 |
|
245 |
return Vector3.zero; |
246 |
} |
247 |
|
248 |
/// <summary> |
249 |
/// Gets the head rotation of the currently tracked user. |
250 |
/// </summary> |
251 |
/// <returns>The head rotation.</returns> |
252 |
/// <param name="bMirroredMovement">If set to <c>true</c> returns mirorred head rotation.</param> |
253 |
public Quaternion GetHeadRotation(bool bMirroredMovement) |
254 |
{ |
255 |
Vector3 rotAngles = headRot.eulerAngles; // bGotHeadRot ? headRot.eulerAngles : Vector3.zero; |
256 |
|
257 |
if(bMirroredMovement) |
258 |
{ |
259 |
rotAngles.x = -rotAngles.x; |
260 |
rotAngles.z = -rotAngles.z; |
261 |
} |
262 |
else |
263 |
{ |
264 |
rotAngles.x = -rotAngles.x; |
265 |
rotAngles.y = -rotAngles.y; |
266 |
} |
267 |
|
268 |
return Quaternion.Euler(rotAngles); |
269 |
} |
270 |
|
271 |
/// <summary> |
272 |
/// Gets the head rotation of the specified user. |
273 |
/// </summary> |
274 |
/// <returns>The head rotation.</returns> |
275 |
/// <param name="userId">User ID</param> |
276 |
/// <param name="bMirroredMovement">If set to <c>true</c> returns mirorred head rotation.</param> |
277 |
public Quaternion GetHeadRotation(long userId, bool bMirroredMovement) |
278 |
{ |
279 |
Quaternion vHeadRot = Quaternion.identity; |
280 |
bool bGotRotation = sensorData.sensorInterface.GetHeadRotation(userId, ref vHeadRot); |
281 |
|
282 |
if(bGotRotation) |
283 |
{ |
284 |
Vector3 rotAngles = vHeadRot.eulerAngles; |
285 |
|
286 |
if(bMirroredMovement) |
287 |
{ |
288 |
rotAngles.x = -rotAngles.x; |
289 |
rotAngles.z = -rotAngles.z; |
290 |
} |
291 |
else |
292 |
{ |
293 |
rotAngles.x = -rotAngles.x; |
294 |
rotAngles.y = -rotAngles.y; |
295 |
} |
296 |
|
297 |
return Quaternion.Euler(rotAngles); |
298 |
} |
299 |
|
300 |
return Quaternion.identity; |
301 |
} |
302 |
|
303 |
/// <summary> |
304 |
/// Gets the tracked face rectangle of the specified user in color image coordinates, or zero-rect if the user's face is not tracked. |
305 |
/// </summary> |
306 |
/// <returns>The face rectangle, in color image coordinates.</returns> |
307 |
/// <param name="userId">User ID</param> |
308 |
public Rect GetFaceColorRect(long userId) |
309 |
{ |
310 |
Rect faceColorRect = new Rect(); |
311 |
sensorData.sensorInterface.GetFaceRect(userId, ref faceColorRect); |
312 |
|
313 |
return faceColorRect; |
314 |
} |
315 |
|
316 |
/// <summary> |
317 |
/// Determines whether there are valid anim units. |
318 |
/// </summary> |
319 |
/// <returns><c>true</c> if there are valid anim units; otherwise, <c>false</c>.</returns> |
320 |
public bool IsGotAU() |
321 |
{ |
322 |
return bGotAU; |
323 |
} |
324 |
|
325 |
/// <summary> |
326 |
/// Gets the animation unit value at given index, or 0 if the index is invalid. |
327 |
/// </summary> |
328 |
/// <returns>The animation unit value.</returns> |
329 |
/// <param name="faceAnimKey">Face animation unit.</param> |
330 |
public float GetAnimUnit(KinectInterop.FaceShapeAnimations faceAnimKey) |
331 |
{ |
332 |
if(dictAU.ContainsKey(faceAnimKey)) |
333 |
{ |
334 |
return dictAU[faceAnimKey]; |
335 |
} |
336 |
|
337 |
return 0.0f; |
338 |
} |
339 |
|
340 |
/// <summary> |
341 |
/// Gets all animation units for the specified user. |
342 |
/// </summary> |
343 |
/// <returns><c>true</c>, if the user's face is tracked, <c>false</c> otherwise.</returns> |
344 |
/// <param name="userId">User ID</param> |
345 |
/// <param name="dictAnimUnits">Animation units dictionary, to get the results.</param> |
346 |
public bool GetUserAnimUnits(long userId, ref Dictionary<KinectInterop.FaceShapeAnimations, float> dictAnimUnits) |
347 |
{ |
348 |
if(sensorData != null && sensorData.sensorInterface != null) |
349 |
{ |
350 |
bool bGotIt = sensorData.sensorInterface.GetAnimUnits(userId, ref dictAnimUnits); |
351 |
return bGotIt; |
352 |
} |
353 |
|
354 |
return false; |
355 |
} |
356 |
|
357 |
/// <summary> |
358 |
/// Determines whether there are valid shape units. |
359 |
/// </summary> |
360 |
/// <returns><c>true</c> if there are valid shape units; otherwise, <c>false</c>.</returns> |
361 |
public bool IsGotSU() |
362 |
{ |
363 |
return bGotSU; |
364 |
} |
365 |
|
366 |
/// <summary> |
367 |
/// Gets the shape unit value at given index, or 0 if the index is invalid. |
368 |
/// </summary> |
369 |
/// <returns>The shape unit value.</returns> |
370 |
/// <param name="faceShapeKey">Face shape unit.</param> |
371 |
public float GetShapeUnit(KinectInterop.FaceShapeDeformations faceShapeKey) |
372 |
{ |
373 |
if(dictSU.ContainsKey(faceShapeKey)) |
374 |
{ |
375 |
return dictSU[faceShapeKey]; |
376 |
} |
377 |
|
378 |
return 0.0f; |
379 |
} |
380 |
|
381 |
/// <summary> |
382 |
/// Gets all animation units for the specified user. |
383 |
/// </summary> |
384 |
/// <returns><c>true</c>, if the user's face is tracked, <c>false</c> otherwise.</returns> |
385 |
/// <param name="userId">User ID</param> |
386 |
/// <param name="dictShapeUnits">Shape units dictionary, to get the results.</param> |
387 |
public bool GetUserShapeUnits(long userId, ref Dictionary<KinectInterop.FaceShapeDeformations, float> dictShapeUnits) |
388 |
{ |
389 |
if(sensorData != null && sensorData.sensorInterface != null) |
390 |
{ |
391 |
bool bGotIt = sensorData.sensorInterface.GetShapeUnits(userId, ref dictShapeUnits); |
392 |
return bGotIt; |
393 |
} |
394 |
|
395 |
return false; |
396 |
} |
397 |
|
398 |
/// <summary> |
399 |
/// Gets the count of face model vertices. |
400 |
/// </summary> |
401 |
/// <returns>The count of face model vertices.</returns> |
402 |
public int GetFaceModelVertexCount() |
403 |
{ |
404 |
if (avModelVertices != null) |
405 |
{ |
406 |
return avModelVertices.Length; |
407 |
} |
408 |
|
409 |
return 0; |
410 |
} |
411 |
|
412 |
/// <summary> |
413 |
/// Gets the face model vertex, if a face model is available and the index is in range; Vector3.zero otherwise. |
414 |
/// </summary> |
415 |
/// <returns>The face model vertex.</returns> |
416 |
/// <param name="index">Vertex index, or Vector3.zero</param> |
417 |
public Vector3 GetFaceModelVertex(int index) |
418 |
{ |
419 |
if (avModelVertices != null) |
420 |
{ |
421 |
if(index >= 0 && index < avModelVertices.Length) |
422 |
{ |
423 |
return avModelVertices[index]; |
424 |
} |
425 |
} |
426 |
|
427 |
return Vector3.zero; |
428 |
} |
429 |
|
430 |
/// <summary> |
431 |
/// Gets all face model vertices, if a face model is available; null otherwise. |
432 |
/// </summary> |
433 |
/// <returns>The face model vertices, or null.</returns> |
434 |
public Vector3[] GetFaceModelVertices() |
435 |
{ |
436 |
return avModelVertices; |
437 |
} |
438 |
|
439 |
/// <summary> |
440 |
/// Gets the count of face model vertices for the specified user |
441 |
/// </summary> |
442 |
/// <returns>The count of face model vertices.</returns> |
443 |
/// <param name="userId">User ID</param> |
444 |
public int GetUserFaceVertexCount(long userId) |
445 |
{ |
446 |
if(sensorData != null && sensorData.sensorInterface != null) |
447 |
{ |
448 |
int iVertCount = sensorData.sensorInterface.GetFaceModelVerticesCount(userId); |
449 |
return iVertCount; |
450 |
} |
451 |
|
452 |
return 0; |
453 |
} |
454 |
|
455 |
/// <summary> |
456 |
/// Gets all face model vertices for the specified user. |
457 |
/// </summary> |
458 |
/// <returns><c>true</c>, if the user's face is tracked, <c>false</c> otherwise.</returns> |
459 |
/// <param name="userId">User ID</param> |
460 |
/// <param name="avVertices">Reference to array of vertices, to get the result.</param> |
461 |
public bool GetUserFaceVertices(long userId, ref Vector3[] avVertices) |
462 |
{ |
463 |
if(sensorData != null && sensorData.sensorInterface != null) |
464 |
{ |
465 |
bool bGotIt = sensorData.sensorInterface.GetFaceModelVertices(userId, ref avVertices); |
466 |
return bGotIt; |
467 |
} |
468 |
|
469 |
return false; |
470 |
} |
471 |
|
472 |
/// <summary> |
473 |
/// Gets the count of face model triangles. |
474 |
/// </summary> |
475 |
/// <returns>The count of face model triangles.</returns> |
476 |
public int GetFaceModelTriangleCount() |
477 |
{ |
478 |
if (avModelTriangles != null) |
479 |
{ |
480 |
return avModelTriangles.Length; |
481 |
} |
482 |
|
483 |
return 0; |
484 |
} |
485 |
|
486 |
/// <summary> |
487 |
/// Gets the face model triangle indices, if a face model is available; null otherwise. |
488 |
/// </summary> |
489 |
/// <returns>The face model triangle indices, or null.</returns> |
490 |
/// <param name="bMirroredModel">If set to <c>true</c> gets mirorred model indices.</param> |
491 |
public int[] GetFaceModelTriangleIndices(bool bMirroredModel) |
492 |
{ |
493 |
if (avModelTriangles != null) |
494 |
{ |
495 |
return avModelTriangles; |
496 |
} |
497 |
|
498 |
return null; |
499 |
} |
500 |
|
501 |
|
502 |
//----------------------------------- end of public functions --------------------------------------// |
503 |
|
504 |
void Awake() |
505 |
{ |
506 |
instance = this; |
507 |
} |
508 |
|
509 |
void Start() |
510 |
{ |
511 |
try |
512 |
{ |
513 |
// get sensor data |
514 |
KinectManager kinectManager = KinectManager.Instance; |
515 |
if(kinectManager && kinectManager.IsInitialized()) |
516 |
{ |
517 |
sensorData = kinectManager.GetSensorData(); |
518 |
} |
519 |
|
520 |
if(sensorData == null || sensorData.sensorInterface == null) |
521 |
{ |
522 |
throw new Exception("Face tracking cannot be started, because KinectManager is missing or not initialized."); |
523 |
} |
524 |
|
525 |
if(debugText != null) |
526 |
{ |
527 |
debugText.text = "Please, wait..."; |
528 |
} |
529 |
|
530 |
// ensure the needed dlls are in place and face tracking is available for this interface |
531 |
bool bNeedRestart = false; |
532 |
if(sensorData.sensorInterface.IsFaceTrackingAvailable(ref bNeedRestart)) |
533 |
{ |
534 |
if(bNeedRestart) |
535 |
{ |
536 |
KinectInterop.RestartLevel(gameObject, "FM"); |
537 |
return; |
538 |
} |
539 |
} |
540 |
else |
541 |
{ |
542 |
string sInterfaceName = sensorData.sensorInterface.GetType().Name; |
543 |
throw new Exception(sInterfaceName + ": Face tracking is not supported!"); |
544 |
} |
545 |
|
546 |
// Initialize the face tracker |
547 |
wasFacetrackingActive = sensorData.sensorInterface.IsFaceTrackingActive(); |
548 |
if(!wasFacetrackingActive) |
549 |
{ |
550 |
if (!sensorData.sensorInterface.InitFaceTracking(getFaceModelData, displayFaceRect)) |
551 |
{ |
552 |
throw new Exception("Face tracking could not be initialized."); |
553 |
} |
554 |
} |
555 |
|
556 |
isFacetrackingInitialized = true; |
557 |
|
558 |
//DontDestroyOnLoad(gameObject); |
559 |
|
560 |
if(debugText != null) |
561 |
{ |
562 |
debugText.text = "Ready."; |
563 |
} |
564 |
} |
565 |
catch(DllNotFoundException ex) |
566 |
{ |
567 |
Debug.LogError(ex.ToString()); |
568 |
if(debugText != null) |
569 |
debugText.text = "Please check the Kinect and FT-Library installations."; |
570 |
} |
571 |
catch (Exception ex) |
572 |
{ |
573 |
Debug.LogError(ex.ToString()); |
574 |
if(debugText != null) |
575 |
debugText.text = ex.Message; |
576 |
} |
577 |
} |
578 |
|
579 |
void OnDestroy() |
580 |
{ |
581 |
if(isFacetrackingInitialized && !wasFacetrackingActive && sensorData != null && sensorData.sensorInterface != null) |
582 |
{ |
583 |
// finish face tracking |
584 |
sensorData.sensorInterface.FinishFaceTracking(); |
585 |
} |
586 |
|
587 |
if (faceMeshTexture != null) |
588 |
{ |
589 |
faceMeshTexture.Release(); |
590 |
faceMeshTexture = null; |
591 |
} |
592 |
|
593 |
// // clean up |
594 |
// Resources.UnloadUnusedAssets(); |
595 |
// GC.Collect(); |
596 |
|
597 |
isFacetrackingInitialized = false; |
598 |
instance = null; |
599 |
} |
600 |
|
601 |
void Update() |
602 |
{ |
603 |
if(isFacetrackingInitialized) |
604 |
{ |
605 |
KinectManager kinectManager = KinectManager.Instance; |
606 |
if(kinectManager && kinectManager.IsInitialized()) |
607 |
{ |
608 |
lastUserID = primaryUserID; |
609 |
primaryUserID = kinectManager.GetUserIdByIndex(playerIndex); |
610 |
|
611 |
if (primaryUserID != lastUserID && primaryUserID != 0) |
612 |
{ |
613 |
//faceMeshGotOnce = false; |
614 |
} |
615 |
} |
616 |
|
617 |
// update the face tracker |
618 |
isTrackingFace = false; |
619 |
|
620 |
bool bFacetrackingUpdated = !wasFacetrackingActive ? sensorData.sensorInterface.UpdateFaceTracking() : true; |
621 |
if(bFacetrackingUpdated) |
622 |
{ |
623 |
// estimate the tracking state |
624 |
isTrackingFace = sensorData.sensorInterface.IsFaceTracked(primaryUserID); |
625 |
|
626 |
if(!isTrackingFace && (Time.realtimeSinceStartup - lastFaceTrackedTime) <= faceTrackingTolerance) |
627 |
{ |
628 |
// allow tolerance in tracking |
629 |
isTrackingFace = true; |
630 |
} |
631 |
|
632 |
// get the facetracking parameters |
633 |
if(isTrackingFace) |
634 |
{ |
635 |
lastFaceTrackedTime = Time.realtimeSinceStartup; |
636 |
facePosUpdateTime = Time.time; |
637 |
|
638 |
// get face rectangle |
639 |
/**bGotFaceRect =*/ sensorData.sensorInterface.GetFaceRect(primaryUserID, ref faceRect); |
640 |
|
641 |
// get head position |
642 |
bGotHeadPos = sensorData.sensorInterface.GetHeadPosition(primaryUserID, ref headPos); |
643 |
|
644 |
// get head rotation |
645 |
bGotHeadRot = sensorData.sensorInterface.GetHeadRotation(primaryUserID, ref headRot); |
646 |
|
647 |
// get the animation units |
648 |
bGotAU = sensorData.sensorInterface.GetAnimUnits(primaryUserID, ref dictAU); |
649 |
|
650 |
// get the shape units |
651 |
bGotSU = sensorData.sensorInterface.GetShapeUnits(primaryUserID, ref dictSU); |
652 |
|
653 |
//if(faceModelMesh != null && faceModelMesh.activeInHierarchy) |
654 |
{ |
655 |
// apply model vertices to the mesh |
656 |
if(!bFaceModelMeshInited) |
657 |
{ |
658 |
bFaceModelMeshInited = CreateFaceModelMesh(); |
659 |
} |
660 |
} |
661 |
|
662 |
if (getFaceModelData && bFaceModelMeshInited && primaryUserID != 0) |
663 |
{ |
664 |
if (!pauseModelMeshUpdates && !updateFaceMeshStarted) |
665 |
{ |
666 |
StartCoroutine(UpdateFaceModelMesh()); |
667 |
} |
668 |
} |
669 |
} |
670 |
} |
671 |
|
672 |
// // set mesh activity flag |
673 |
// bool bFaceMeshActive = isTrackingFace && primaryUserID != 0; |
674 |
// if(faceModelMesh != null && bFaceModelMeshInited && faceModelMesh.activeSelf != bFaceMeshActive) |
675 |
// { |
676 |
// faceModelMesh.SetActive(bFaceMeshActive); |
677 |
// } |
678 |
} |
679 |
} |
680 |
|
681 |
void OnGUI() |
682 |
{ |
683 |
if(isFacetrackingInitialized) |
684 |
{ |
685 |
if(debugText != null) |
686 |
{ |
687 |
if(isTrackingFace) |
688 |
{ |
689 |
debugText.text = "Tracking - BodyID: " + primaryUserID; |
690 |
} |
691 |
else |
692 |
{ |
693 |
debugText.text = "Not tracking..."; |
694 |
} |
695 |
} |
696 |
} |
697 |
} |
698 |
|
699 |
|
700 |
protected bool CreateFaceModelMesh() |
701 |
{ |
702 |
// if(faceModelMesh == null) |
703 |
// return false; |
704 |
|
705 |
if (avModelVertices == null /**&& !bGotModelVerticesFromDC*/) |
706 |
{ |
707 |
int iNumVertices = sensorData.sensorInterface.GetFaceModelVerticesCount(0); |
708 |
if(iNumVertices <= 0) |
709 |
return false; |
710 |
|
711 |
avModelVertices = new Vector3[iNumVertices]; |
712 |
bGotModelVertices = sensorData.sensorInterface.GetFaceModelVertices(0, ref avModelVertices); |
713 |
|
714 |
avModelUV = new Vector2[iNumVertices]; |
715 |
|
716 |
if(!bGotModelVertices) |
717 |
return false; |
718 |
} |
719 |
|
720 |
// estimate face mesh vertices with respect to the head joint |
721 |
Vector3[] vMeshVertices = new Vector3[avModelVertices.Length]; |
722 |
|
723 |
//if (!bGotModelVerticesFromDC) |
724 |
{ |
725 |
Vector3 vFaceCenter = Vector3.zero; |
726 |
for (int i = 0; i < avModelVertices.Length; i++) |
727 |
{ |
728 |
vFaceCenter += avModelVertices[i]; |
729 |
} |
730 |
|
731 |
vFaceCenter /= (float)avModelVertices.Length; |
732 |
|
733 |
faceHeadOffset = Vector3.zero; |
734 |
if (vFaceCenter.sqrMagnitude >= 1f) |
735 |
{ |
736 |
Vector3 vHeadToFace = (vFaceCenter - headPos); |
737 |
|
738 |
faceHeadOffset = Quaternion.Inverse(headRot) * vHeadToFace; |
739 |
faceHeadOffset.y += verticalMeshOffset; |
740 |
} |
741 |
|
742 |
vFaceCenter -= headRot * faceHeadOffset; |
743 |
|
744 |
for(int i = 0; i < avModelVertices.Length; i++) |
745 |
{ |
746 |
//avModelVertices[i] = kinectToWorld.MultiplyPoint3x4(avModelVertices[i]) - headPosWorld; |
747 |
//avModelVertices[i] -= vFaceCenter; |
748 |
|
749 |
vMeshVertices[i] = avModelVertices[i] - vFaceCenter; |
750 |
} |
751 |
} |
752 |
|
753 |
if (avModelTriangles == null && !bGotModelTrianglesFromDC) |
754 |
{ |
755 |
int iNumTriangles = sensorData.sensorInterface.GetFaceModelTrianglesCount(); |
756 |
if(iNumTriangles <= 0) |
757 |
return false; |
758 |
|
759 |
avModelTriangles = new int[iNumTriangles]; |
760 |
bGotModelTriangles = sensorData.sensorInterface.GetFaceModelTriangles(mirroredModelMesh, ref avModelTriangles); |
761 |
|
762 |
if(!bGotModelTriangles) |
763 |
return false; |
764 |
} |
765 |
|
766 |
if (faceModelMesh) |
767 |
{ |
768 |
Mesh mesh = new Mesh(); |
769 |
mesh.name = "FaceMesh"; |
770 |
faceModelMesh.GetComponent<MeshFilter>().mesh = mesh; |
771 |
|
772 |
mesh.vertices = vMeshVertices; // avModelVertices; |
773 |
//mesh.uv = avModelUV; |
774 |
|
775 |
mesh.triangles = avModelTriangles; |
776 |
mesh.RecalculateNormals(); |
777 |
|
778 |
// if (moveModelMesh) |
779 |
// { |
780 |
// faceModelMesh.transform.position = headPos; |
781 |
// //faceModelMesh.transform.rotation = faceModelRot; |
782 |
// } |
783 |
|
784 |
SetFaceModelMeshTexture(); |
785 |
} |
786 |
|
787 |
//bFaceModelMeshInited = true; |
788 |
return true; |
789 |
} |
790 |
|
791 |
// sets the proper face mesh texture |
792 |
protected void SetFaceModelMeshTexture() |
793 |
{ |
794 |
if (texturedModelMesh == TextureType.ColorMap) |
795 |
{ |
796 |
KinectManager kinectManager = KinectManager.Instance; |
797 |
Texture texColorMap = kinectManager ? kinectManager.GetUsersClrTex() : null; |
798 |
|
799 |
if (!faceMeshTexture && kinectManager && texColorMap) |
800 |
{ |
801 |
faceMeshTexture = new RenderTexture (texColorMap.width, texColorMap.height, 0); |
802 |
faceModelMesh.GetComponent<MeshRenderer>().material.mainTexture = faceMeshTexture; // kinectManager.GetUsersClrTex(); |
803 |
} |
804 |
|
805 |
if (faceMeshTexture && texColorMap) |
806 |
{ |
807 |
// update the color texture |
808 |
Graphics.Blit(texColorMap, faceMeshTexture); |
809 |
} |
810 |
} |
811 |
else if (texturedModelMesh == TextureType.FaceRectangle) |
812 |
{ |
813 |
if (faceMeshTexture != null) |
814 |
{ |
815 |
faceMeshTexture = null; |
816 |
} |
817 |
} |
818 |
else if(texturedModelMesh == TextureType.None) |
819 |
{ |
820 |
if (faceModelMesh.GetComponent<MeshRenderer>().material.mainTexture != null) |
821 |
{ |
822 |
faceMeshTexture = null; |
823 |
faceModelMesh.GetComponent<MeshRenderer>().material.mainTexture = null; |
824 |
} |
825 |
} |
826 |
} |
827 |
|
828 |
|
829 |
protected IEnumerator UpdateFaceModelMesh() |
830 |
{ |
831 |
updateFaceMeshStarted = true; |
832 |
|
833 |
//if (!dontUpdateModelMesh || !faceMeshGotOnce /**&& !bGotModelVerticesFromDC*/) |
834 |
{ |
835 |
// init the vertices array if needed |
836 |
if(avModelVertices == null) |
837 |
{ |
838 |
int iNumVertices = sensorData.sensorInterface.GetFaceModelVerticesCount(primaryUserID); |
839 |
avModelVertices = new Vector3[iNumVertices]; |
840 |
} |
841 |
|
842 |
// get face model vertices |
843 |
bGotModelVertices = sensorData.sensorInterface.GetFaceModelVertices(primaryUserID, ref avModelVertices); |
844 |
} |
845 |
|
846 |
if(bGotModelVertices && faceModelMesh != null) |
847 |
{ |
848 |
//Quaternion faceModelRot = faceModelMesh.transform.rotation; |
849 |
//faceModelMesh.transform.rotation = Quaternion.identity; |
850 |
|
851 |
bool bFaceMeshUpdated = false; |
852 |
//if (!dontUpdateModelMesh || !faceMeshGotOnce) |
853 |
{ |
854 |
AsyncTask<bool> task = new AsyncTask<bool>(() => { |
855 |
// estimate face mesh vertices with respect to the head joint |
856 |
vMeshVertices = null; |
857 |
|
858 |
KinectManager kinectManager = KinectManager.Instance; |
859 |
Matrix4x4 kinectToWorld = kinectManager ? kinectManager.GetKinectToWorldMatrix() : Matrix4x4.identity; |
860 |
Vector3 headPosWorld = kinectToWorld.MultiplyPoint3x4(headPos); |
861 |
|
862 |
Vector3 lastNosePos = nosePos; |
863 |
//if (!bGotModelVerticesFromDC) |
864 |
{ |
865 |
// Vector3 vFaceCenter = Vector3.zero; |
866 |
// for (int i = 0; i < avModelVertices.Length; i++) |
867 |
// { |
868 |
// vFaceCenter += avModelVertices[i]; |
869 |
// } |
870 |
// |
871 |
// vFaceCenter /= (float)avModelVertices.Length; |
872 |
// |
873 |
// Vector3 vHeadToFace = (vFaceCenter - headPos); |
874 |
// if (vHeadToFace.sqrMagnitude < 0.015f) // max 0.12 x 0.12 |
875 |
// { |
876 |
// faceHeadOffset = Quaternion.Inverse(headRot) * vHeadToFace; |
877 |
// faceHeadOffset.y += verticalMeshOffset; |
878 |
// } |
879 |
|
880 |
nosePos = GetFaceModelNosePos(); |
881 |
Vector3 vHeadToNose = Quaternion.Inverse(headRot) * (nosePos - headPos); |
882 |
float headToNoseLen = vHeadToNose.magnitude; |
883 |
|
884 |
// string sHeadToNose = string.Format("({0:F2}, {0:F2}, {0:F2})", vHeadToNose.x, vHeadToNose.y, vHeadToNose.z); |
885 |
// Debug.Log("U-Face nosePos: " + nosePos + ", headPos: " + headPos + "\noffset: " + sHeadToNose + ", len: " + headToNoseLen); |
886 |
|
887 |
if(headToNoseLen >= 0.08f && headToNoseLen <= 0.18f) |
888 |
{ |
889 |
//vFaceCenter -= headRot * faceHeadOffset; |
890 |
|
891 |
vMeshVertices = new Vector3[avModelVertices.Length]; |
892 |
for(int i = 0; i < avModelVertices.Length; i++) |
893 |
{ |
894 |
//avModelVertices[i] = kinectToWorld.MultiplyPoint3x4(avModelVertices[i]) - headPosWorld; |
895 |
//avModelVertices[i] -= vFaceCenter; |
896 |
|
897 |
//vMeshVertices[i] = avModelVertices[i] - vFaceCenter; |
898 |
vMeshVertices[i] = kinectToWorld.MultiplyPoint3x4(avModelVertices[i]) - headPosWorld; // avModelVertices[i] - headPos; |
899 |
} |
900 |
} |
901 |
} |
902 |
|
903 |
if(vMeshVertices == null || lastNosePos == nosePos) |
904 |
{ |
905 |
return false; |
906 |
} |
907 |
|
908 |
//if (!bGotModelVerticesFromDC) |
909 |
{ |
910 |
if(texturedModelMesh != TextureType.None) |
911 |
{ |
912 |
float colorWidth = (float)kinectManager.GetColorImageWidth(); |
913 |
float colorHeight = (float)kinectManager.GetColorImageHeight(); |
914 |
|
915 |
//bool bGotFaceRect = sensorData.sensorInterface.GetFaceRect(userId, ref faceRect); |
916 |
bool faceRectValid = /**bGotFaceRect &&*/ faceRect.width > 0 && faceRect.height > 0; |
917 |
|
918 |
for(int i = 0; i < avModelVertices.Length; i++) |
919 |
{ |
920 |
Vector2 posDepth = kinectManager.MapSpacePointToDepthCoords(avModelVertices[i]); |
921 |
|
922 |
bool bUvSet = false; |
923 |
if(posDepth != Vector2.zero) |
924 |
{ |
925 |
ushort depth = kinectManager.GetDepthForPixel((int)posDepth.x, (int)posDepth.y); |
926 |
Vector2 posColor = kinectManager.MapDepthPointToColorCoords(posDepth, depth); |
927 |
|
928 |
if(posColor != Vector2.zero && !float.IsInfinity(posColor.x) && !float.IsInfinity(posColor.y)) |
929 |
{ |
930 |
if(texturedModelMesh == TextureType.ColorMap) |
931 |
{ |
932 |
avModelUV[i] = new Vector2(posColor.x / colorWidth, posColor.y / colorHeight); |
933 |
bUvSet = true; |
934 |
} |
935 |
else if(texturedModelMesh == TextureType.FaceRectangle && faceRectValid) |
936 |
{ |
937 |
avModelUV[i] = new Vector2(Mathf.Clamp01((posColor.x - faceRect.x) / faceRect.width), |
938 |
-Mathf.Clamp01((posColor.y - faceRect.y) / faceRect.height)); |
939 |
bUvSet = true; |
940 |
} |
941 |
} |
942 |
} |
943 |
|
944 |
if(!bUvSet) |
945 |
{ |
946 |
avModelUV[i] = Vector2.zero; |
947 |
} |
948 |
} |
949 |
} |
950 |
} |
951 |
|
952 |
return true; |
953 |
}); |
954 |
|
955 |
task.Start(); |
956 |
|
957 |
while (task.State == AsyncTaskState.Running) |
958 |
{ |
959 |
yield return null; |
960 |
} |
961 |
|
962 |
// // show nose & head positions |
963 |
// Matrix4x4 kinectToWorld2 = KinectManager.Instance.GetKinectToWorldMatrix(); |
964 |
// if (noseTransform) |
965 |
// noseTransform.position = kinectToWorld2.MultiplyPoint3x4(nosePos); |
966 |
// if(headTransform) |
967 |
// headTransform.position = kinectToWorld2.MultiplyPoint3x4(headPos); |
968 |
// |
969 |
// Vector3 vHeadToNose2 = Quaternion.Inverse(headRot) * (nosePos - headPos); |
970 |
// string sHeadToNose2 = string.Format("({0:F2}, {0:F2}, {0:F2})", vHeadToNose2.x, vHeadToNose2.y, vHeadToNose2.z); |
971 |
// if(debugText2) |
972 |
// debugText2.text = "h2n: " + sHeadToNose2 + ", len: " + vHeadToNose2.magnitude; |
973 |
|
974 |
bFaceMeshUpdated = task.Result; |
975 |
if(bFaceMeshUpdated) |
976 |
{ |
977 |
Mesh mesh = faceModelMesh.GetComponent<MeshFilter>().mesh; |
978 |
mesh.vertices = vMeshVertices; // avModelVertices; |
979 |
vMeshVertices = null; |
980 |
|
981 |
if(texturedModelMesh != TextureType.None && avModelUV != null) |
982 |
{ |
983 |
mesh.uv = avModelUV; |
984 |
} |
985 |
|
986 |
faceMeshUpdateTime = Time.time; |
987 |
//faceMeshGotOnce = true; |
988 |
|
989 |
mesh.RecalculateNormals(); |
990 |
mesh.RecalculateBounds(); |
991 |
|
992 |
// set the face mesh texture |
993 |
SetFaceModelMeshTexture(); |
994 |
} |
995 |
} |
996 |
|
997 |
if (moveModelMesh) |
998 |
{ |
999 |
KinectManager kinectManager = KinectManager.Instance; |
1000 |
Matrix4x4 kinectToWorld = kinectManager ? kinectManager.GetKinectToWorldMatrix() : Matrix4x4.identity; |
1001 |
Vector3 newHeadPos = kinectToWorld.MultiplyPoint3x4(headPos); |
1002 |
|
1003 |
// check for head pos overlay |
1004 |
if(foregroundCamera) |
1005 |
{ |
1006 |
// get the background rectangle (use the portrait background, if available) |
1007 |
Rect backgroundRect = foregroundCamera.pixelRect; |
1008 |
PortraitBackground portraitBack = PortraitBackground.Instance; |
1009 |
|
1010 |
if(portraitBack && portraitBack.enabled) |
1011 |
{ |
1012 |
backgroundRect = portraitBack.GetBackgroundRect(); |
1013 |
} |
1014 |
|
1015 |
if(kinectManager) |
1016 |
{ |
1017 |
Vector3 posColorOverlay = kinectManager.GetJointPosColorOverlay(primaryUserID, (int)KinectInterop.JointType.Head, foregroundCamera, backgroundRect); |
1018 |
|
1019 |
if(posColorOverlay != Vector3.zero) |
1020 |
{ |
1021 |
newHeadPos = posColorOverlay; |
1022 |
} |
1023 |
} |
1024 |
} |
1025 |
|
1026 |
faceModelMesh.transform.position = newHeadPos; // Vector3.Lerp(faceModelMesh.transform.position, newHeadPos, 20f * Time.deltaTime); |
1027 |
//faceModelMesh.transform.rotation = faceModelRot; |
1028 |
} |
1029 |
|
1030 |
// don't rotate the transform - mesh follows the head rotation |
1031 |
if (faceModelMesh.transform.rotation != Quaternion.identity) |
1032 |
{ |
1033 |
faceModelMesh.transform.rotation = Quaternion.identity; |
1034 |
} |
1035 |
|
1036 |
// apply scale factor |
1037 |
if(faceModelMesh.transform.localScale.x != modelMeshScale) |
1038 |
{ |
1039 |
faceModelMesh.transform.localScale = new Vector3(modelMeshScale, modelMeshScale, modelMeshScale); |
1040 |
} |
1041 |
|
1042 |
if(!faceModelMesh.activeSelf) |
1043 |
{ |
1044 |
faceModelMesh.SetActive(true); |
1045 |
} |
1046 |
} |
1047 |
else |
1048 |
{ |
1049 |
if(faceModelMesh && faceModelMesh.activeSelf) |
1050 |
{ |
1051 |
faceModelMesh.SetActive(false); |
1052 |
} |
1053 |
} |
1054 |
|
1055 |
updateFaceMeshStarted = false; |
1056 |
} |
1057 |
|
1058 |
// returns the nose tip position, or Vector3.zero if not found |
1059 |
private Vector3 GetFaceModelNosePos() |
1060 |
{ |
1061 |
if (avModelVertices != null) |
1062 |
{ |
1063 |
int iNoseIndex = -1; |
1064 |
if (sensorData.sensorIntPlatform == KinectInterop.DepthSensorPlatform.KinectSDKv2 || |
1065 |
sensorData.sensorIntPlatform == KinectInterop.DepthSensorPlatform.KinectUWPv2 || |
1066 |
sensorData.sensorIntPlatform == KinectInterop.DepthSensorPlatform.DummyK2) |
1067 |
{ |
1068 |
iNoseIndex = 18; // Microsoft.Kinect.Face.HighDetailFacePoints.NoseTip |
1069 |
} |
1070 |
else if (sensorData.sensorIntPlatform == KinectInterop.DepthSensorPlatform.KinectSDKv1 || |
1071 |
sensorData.sensorIntPlatform == KinectInterop.DepthSensorPlatform.DummyK1) |
1072 |
{ |
1073 |
iNoseIndex = 89; // |
1074 |
} |
1075 |
|
1076 |
if (iNoseIndex >= 0 && iNoseIndex < avModelVertices.Length) |
1077 |
{ |
1078 |
return avModelVertices[iNoseIndex]; |
1079 |
} |
1080 |
} |
1081 |
|
1082 |
return Vector3.zero; |
1083 |
} |
1084 |
|
1085 |
// gets face basic parameters as csv line |
1086 |
public string GetFaceParamsAsCsv() |
1087 |
{ |
1088 |
// create the output string |
1089 |
StringBuilder sbBuf = new StringBuilder(); |
1090 |
const char delimiter = ','; |
1091 |
|
1092 |
if (bGotHeadPos || bGotHeadRot) |
1093 |
{ |
1094 |
sbBuf.Append("fp").Append(delimiter); |
1095 |
|
1096 |
// head pos |
1097 |
sbBuf.Append (bGotHeadPos ? "1" : "0").Append(delimiter); |
1098 |
|
1099 |
if (bGotHeadPos) |
1100 |
{ |
1101 |
sbBuf.AppendFormat ("{0:F3}", headPos.x).Append (delimiter); |
1102 |
sbBuf.AppendFormat ("{0:F3}", headPos.y).Append (delimiter); |
1103 |
sbBuf.AppendFormat ("{0:F3}", headPos.z).Append (delimiter); |
1104 |
} |
1105 |
|
1106 |
// head rot |
1107 |
sbBuf.Append (bGotHeadRot ? "1" : "0").Append(delimiter); |
1108 |
Vector3 vheadRot = headRot.eulerAngles; |
1109 |
|
1110 |
if (bGotHeadRot) |
1111 |
{ |
1112 |
sbBuf.AppendFormat ("{0:F3}", vheadRot.x).Append (delimiter); |
1113 |
sbBuf.AppendFormat ("{0:F3}", vheadRot.y).Append (delimiter); |
1114 |
sbBuf.AppendFormat ("{0:F3}", vheadRot.z).Append (delimiter); |
1115 |
} |
1116 |
|
1117 |
// face rect |
1118 |
sbBuf.Append ("1").Append(delimiter); |
1119 |
sbBuf.AppendFormat ("{0:F0}", faceRect.x).Append (delimiter); |
1120 |
sbBuf.AppendFormat ("{0:F0}", faceRect.y).Append (delimiter); |
1121 |
sbBuf.AppendFormat ("{0:F0}", faceRect.width).Append (delimiter); |
1122 |
sbBuf.AppendFormat ("{0:F0}", faceRect.height).Append (delimiter); |
1123 |
|
1124 |
// animation units |
1125 |
sbBuf.Append (bGotAU ? "1" : "0").Append(delimiter); |
1126 |
|
1127 |
if (bGotAU) |
1128 |
{ |
1129 |
int enumCount = Enum.GetNames (typeof(KinectInterop.FaceShapeAnimations)).Length; |
1130 |
sbBuf.Append (enumCount).Append(delimiter); |
1131 |
|
1132 |
for (int i = 0; i < enumCount; i++) |
1133 |
{ |
1134 |
float dictValue = dictAU [(KinectInterop.FaceShapeAnimations)i]; |
1135 |
sbBuf.AppendFormat ("{0:F3}", dictValue).Append (delimiter); |
1136 |
} |
1137 |
} |
1138 |
|
1139 |
// shape units |
1140 |
sbBuf.Append (bGotSU ? "1" : "0").Append(delimiter); |
1141 |
|
1142 |
if (bGotSU) |
1143 |
{ |
1144 |
int enumCount = Enum.GetNames (typeof(KinectInterop.FaceShapeDeformations)).Length; |
1145 |
sbBuf.Append (enumCount).Append(delimiter); |
1146 |
|
1147 |
for (int i = 0; i < enumCount; i++) |
1148 |
{ |
1149 |
float dictValue = dictSU [(KinectInterop.FaceShapeDeformations)i]; |
1150 |
sbBuf.AppendFormat ("{0:F3}", dictValue).Append (delimiter); |
1151 |
} |
1152 |
} |
1153 |
|
1154 |
// any other parameters... |
1155 |
} |
1156 |
|
1157 |
// remove the last delimiter |
1158 |
if(sbBuf.Length > 0 && sbBuf[sbBuf.Length - 1] == delimiter) |
1159 |
{ |
1160 |
sbBuf.Remove(sbBuf.Length - 1, 1); |
1161 |
} |
1162 |
|
1163 |
return sbBuf.ToString(); |
1164 |
} |
1165 |
|
1166 |
// sets basic face parameters from a csv line |
1167 |
public bool SetFaceParamsFromCsv(string sCsvLine) |
1168 |
{ |
1169 |
if(sCsvLine.Length == 0) |
1170 |
return false; |
1171 |
|
1172 |
// split the csv line in parts |
1173 |
char[] delimiters = { ',' }; |
1174 |
string[] alCsvParts = sCsvLine.Split(delimiters); |
1175 |
|
1176 |
if(alCsvParts.Length < 1 || alCsvParts[0] != "fp") |
1177 |
return false; |
1178 |
|
1179 |
int iIndex = 1; |
1180 |
int iLength = alCsvParts.Length; |
1181 |
|
1182 |
if (iLength < (iIndex + 1)) |
1183 |
return false; |
1184 |
|
1185 |
// head pos |
1186 |
bGotHeadPos = (alCsvParts[iIndex] == "1"); |
1187 |
iIndex++; |
1188 |
|
1189 |
if (bGotHeadPos && iLength >= (iIndex + 3)) |
1190 |
{ |
1191 |
float x = 0f, y = 0f, z = 0f; |
1192 |
|
1193 |
float.TryParse(alCsvParts[iIndex], out x); |
1194 |
float.TryParse(alCsvParts[iIndex + 1], out y); |
1195 |
float.TryParse(alCsvParts[iIndex + 2], out z); |
1196 |
iIndex += 3; |
1197 |
|
1198 |
headPos = new Vector3(x, y, z); |
1199 |
} |
1200 |
|
1201 |
// head rot |
1202 |
bGotHeadRot = (alCsvParts[iIndex] == "1"); |
1203 |
iIndex++; |
1204 |
|
1205 |
if (bGotHeadRot && iLength >= (iIndex + 3)) |
1206 |
{ |
1207 |
float x = 0f, y = 0f, z = 0f; |
1208 |
|
1209 |
float.TryParse(alCsvParts[iIndex], out x); |
1210 |
float.TryParse(alCsvParts[iIndex + 1], out y); |
1211 |
float.TryParse(alCsvParts[iIndex + 2], out z); |
1212 |
iIndex += 3; |
1213 |
|
1214 |
headRot = Quaternion.Euler(x, y, z); |
1215 |
} |
1216 |
|
1217 |
// face rect |
1218 |
bool bGotFaceRect = (alCsvParts[iIndex] == "1"); |
1219 |
iIndex++; |
1220 |
|
1221 |
if (bGotFaceRect && iLength >= (iIndex + 4)) |
1222 |
{ |
1223 |
float x = 0f, y = 0f, w = 0f, h = 0f; |
1224 |
|
1225 |
float.TryParse(alCsvParts[iIndex], out x); |
1226 |
float.TryParse(alCsvParts[iIndex + 1], out y); |
1227 |
float.TryParse(alCsvParts[iIndex + 2], out w); |
1228 |
float.TryParse(alCsvParts[iIndex + 3], out h); |
1229 |
iIndex += 4; |
1230 |
|
1231 |
faceRect.x = x; faceRect.y = y; |
1232 |
faceRect.width = w; faceRect.height = h; |
1233 |
} |
1234 |
|
1235 |
// animation units |
1236 |
bGotAU = (alCsvParts[iIndex] == "1"); |
1237 |
iIndex++; |
1238 |
|
1239 |
if (bGotAU && iLength >= (iIndex + 1)) |
1240 |
{ |
1241 |
int count = 0; |
1242 |
int.TryParse(alCsvParts[iIndex], out count); |
1243 |
iIndex++; |
1244 |
|
1245 |
for (int i = 0; i < count && iLength >= (iIndex + 1); i++) |
1246 |
{ |
1247 |
float v = 0; |
1248 |
float.TryParse(alCsvParts[iIndex], out v); |
1249 |
iIndex++; |
1250 |
|
1251 |
dictAU [(KinectInterop.FaceShapeAnimations)i] = v; |
1252 |
} |
1253 |
} |
1254 |
|
1255 |
// shape units |
1256 |
bGotSU = (alCsvParts[iIndex] == "1"); |
1257 |
iIndex++; |
1258 |
|
1259 |
if (bGotSU && iLength >= (iIndex + 1)) |
1260 |
{ |
1261 |
int count = 0; |
1262 |
int.TryParse(alCsvParts[iIndex], out count); |
1263 |
iIndex++; |
1264 |
|
1265 |
for (int i = 0; i < count && iLength >= (iIndex + 1); i++) |
1266 |
{ |
1267 |
float v = 0; |
1268 |
float.TryParse(alCsvParts[iIndex], out v); |
1269 |
iIndex++; |
1270 |
|
1271 |
dictSU [(KinectInterop.FaceShapeDeformations)i] = v; |
1272 |
} |
1273 |
} |
1274 |
|
1275 |
// any other parameters here... |
1276 |
|
1277 |
// emulate face tracking |
1278 |
lastFaceTrackedTime = Time.realtimeSinceStartup; |
1279 |
facePosUpdateTime = Time.time; |
1280 |
|
1281 |
return true; |
1282 |
} |
1283 |
|
1284 |
// gets face model vertices as csv line |
1285 |
public string GetFaceVerticesAsCsv() |
1286 |
{ |
1287 |
// create the output string |
1288 |
StringBuilder sbBuf = new StringBuilder(); |
1289 |
const char delimiter = ','; |
1290 |
|
1291 |
if (bGotModelVertices && avModelVertices != null) |
1292 |
{ |
1293 |
sbBuf.Append("fv").Append(delimiter); |
1294 |
|
1295 |
// model vertices |
1296 |
int vertCount = avModelVertices.Length; |
1297 |
sbBuf.Append (vertCount).Append(delimiter); |
1298 |
|
1299 |
for (int i = 0; i < vertCount; i++) |
1300 |
{ |
1301 |
sbBuf.AppendFormat ("{0:F3}", avModelVertices[i].x).Append (delimiter); |
1302 |
sbBuf.AppendFormat ("{0:F3}", avModelVertices[i].y).Append (delimiter); |
1303 |
sbBuf.AppendFormat ("{0:F3}", avModelVertices[i].z).Append (delimiter); |
1304 |
} |
1305 |
} |
1306 |
|
1307 |
// remove the last delimiter |
1308 |
if(sbBuf.Length > 0 && sbBuf[sbBuf.Length - 1] == delimiter) |
1309 |
{ |
1310 |
sbBuf.Remove(sbBuf.Length - 1, 1); |
1311 |
} |
1312 |
|
1313 |
return sbBuf.ToString(); |
1314 |
} |
1315 |
|
1316 |
// sets face model vertices from a csv line |
1317 |
public bool SetFaceVerticesFromCsv(string sCsvLine) |
1318 |
{ |
1319 |
if(sCsvLine.Length == 0) |
1320 |
return false; |
1321 |
|
1322 |
// split the csv line in parts |
1323 |
char[] delimiters = { ',' }; |
1324 |
string[] alCsvParts = sCsvLine.Split(delimiters); |
1325 |
|
1326 |
if(alCsvParts.Length < 1 || alCsvParts[0] != "fv") |
1327 |
return false; |
1328 |
|
1329 |
int iIndex = 1; |
1330 |
int iLength = alCsvParts.Length; |
1331 |
|
1332 |
if (iLength < (iIndex + 1)) |
1333 |
return false; |
1334 |
|
1335 |
// model vertices |
1336 |
int vertCount = 0; |
1337 |
int.TryParse(alCsvParts[iIndex], out vertCount); |
1338 |
iIndex++; |
1339 |
|
1340 |
if (vertCount > 0) |
1341 |
{ |
1342 |
if (avModelVertices == null || avModelVertices.Length != vertCount) |
1343 |
{ |
1344 |
avModelVertices = new Vector3[vertCount]; |
1345 |
} |
1346 |
|
1347 |
for (int i = 0; i < vertCount && iLength >= (iIndex + 3); i++) |
1348 |
{ |
1349 |
float x = 0f, y = 0f, z = 0f; |
1350 |
|
1351 |
float.TryParse(alCsvParts[iIndex], out x); |
1352 |
float.TryParse(alCsvParts[iIndex + 1], out y); |
1353 |
float.TryParse(alCsvParts[iIndex + 2], out z); |
1354 |
iIndex += 3; |
1355 |
|
1356 |
avModelVertices[i] = new Vector3(x, y, z); |
1357 |
} |
1358 |
|
1359 |
bGotModelVertices = true; |
1360 |
//bGotModelVerticesFromDC = true; |
1361 |
} |
1362 |
|
1363 |
faceMeshUpdateTime = Time.time; |
1364 |
|
1365 |
return true; |
1366 |
} |
1367 |
|
1368 |
// gets face model UVs as csv line |
1369 |
public string GetFaceUvsAsCsv() |
1370 |
{ |
1371 |
// create the output string |
1372 |
StringBuilder sbBuf = new StringBuilder(); |
1373 |
const char delimiter = ','; |
1374 |
|
1375 |
if (bGotModelVertices && avModelUV != null) |
1376 |
{ |
1377 |
sbBuf.Append("fu").Append(delimiter); |
1378 |
|
1379 |
// face rect width & height |
1380 |
sbBuf.AppendFormat ("{0:F0}", faceRect.width).Append (delimiter); |
1381 |
sbBuf.AppendFormat ("{0:F0}", faceRect.height).Append (delimiter); |
1382 |
|
1383 |
// model UVs |
1384 |
int uvCount = avModelUV.Length; |
1385 |
sbBuf.Append (uvCount).Append(delimiter); |
1386 |
|
1387 |
for (int i = 0; i < uvCount; i++) |
1388 |
{ |
1389 |
sbBuf.AppendFormat ("{0:F3}", avModelUV[i].x).Append (delimiter); |
1390 |
sbBuf.AppendFormat ("{0:F3}", avModelUV[i].y).Append (delimiter); |
1391 |
} |
1392 |
} |
1393 |
|
1394 |
// remove the last delimiter |
1395 |
if(sbBuf.Length > 0 && sbBuf[sbBuf.Length - 1] == delimiter) |
1396 |
{ |
1397 |
sbBuf.Remove(sbBuf.Length - 1, 1); |
1398 |
} |
1399 |
|
1400 |
return sbBuf.ToString(); |
1401 |
} |
1402 |
|
1403 |
// sets face model UVs from a csv line |
1404 |
public bool SetFaceUvsFromCsv(string sCsvLine) |
1405 |
{ |
1406 |
if(sCsvLine.Length == 0) |
1407 |
return false; |
1408 |
|
1409 |
// split the csv line in parts |
1410 |
char[] delimiters = { ',' }; |
1411 |
string[] alCsvParts = sCsvLine.Split(delimiters); |
1412 |
|
1413 |
if(alCsvParts.Length < 1 || alCsvParts[0] != "fu") |
1414 |
return false; |
1415 |
|
1416 |
int iIndex = 1; |
1417 |
int iLength = alCsvParts.Length; |
1418 |
|
1419 |
if (iLength < (iIndex + 2)) |
1420 |
return false; |
1421 |
|
1422 |
// face width & height |
1423 |
float w = 0f, h = 0f; |
1424 |
|
1425 |
float.TryParse(alCsvParts[iIndex], out w); |
1426 |
float.TryParse(alCsvParts[iIndex + 1], out h); |
1427 |
iIndex += 2; |
1428 |
|
1429 |
faceRect.width = w; faceRect.height = h; |
1430 |
|
1431 |
// model UVs |
1432 |
int uvCount = 0; |
1433 |
if (iLength >= (iIndex + 1)) |
1434 |
{ |
1435 |
int.TryParse(alCsvParts[iIndex], out uvCount); |
1436 |
iIndex++; |
1437 |
} |
1438 |
|
1439 |
if (uvCount > 0) |
1440 |
{ |
1441 |
if (avModelUV == null || avModelUV.Length != uvCount) |
1442 |
{ |
1443 |
avModelUV = new Vector2[uvCount]; |
1444 |
} |
1445 |
|
1446 |
for (int i = 0; i < uvCount && iLength >= (iIndex + 2); i++) |
1447 |
{ |
1448 |
float x = 0f, y = 0f; |
1449 |
|
1450 |
float.TryParse(alCsvParts[iIndex], out x); |
1451 |
float.TryParse(alCsvParts[iIndex + 1], out y); |
1452 |
iIndex += 2; |
1453 |
|
1454 |
avModelUV[i] = new Vector2(x, y); |
1455 |
} |
1456 |
} |
1457 |
|
1458 |
return true; |
1459 |
} |
1460 |
|
1461 |
// gets face model triangles as csv line |
1462 |
public string GetFaceTrianglesAsCsv() |
1463 |
{ |
1464 |
// create the output string |
1465 |
StringBuilder sbBuf = new StringBuilder(); |
1466 |
const char delimiter = ','; |
1467 |
|
1468 |
if (avModelTriangles != null) |
1469 |
{ |
1470 |
sbBuf.Append("ft").Append(delimiter); |
1471 |
|
1472 |
// model triangles |
1473 |
int triCount = avModelTriangles.Length; |
1474 |
sbBuf.Append (triCount).Append(delimiter); |
1475 |
|
1476 |
for (int i = 0; i < triCount; i++) |
1477 |
{ |
1478 |
sbBuf.Append(avModelTriangles[i]).Append (delimiter); |
1479 |
} |
1480 |
} |
1481 |
|
1482 |
// remove the last delimiter |
1483 |
if(sbBuf.Length > 0 && sbBuf[sbBuf.Length - 1] == delimiter) |
1484 |
{ |
1485 |
sbBuf.Remove(sbBuf.Length - 1, 1); |
1486 |
} |
1487 |
|
1488 |
return sbBuf.ToString(); |
1489 |
} |
1490 |
|
1491 |
// sets face model model from a csv line |
1492 |
public bool SetFaceTrianglesFromCsv(string sCsvLine) |
1493 |
{ |
1494 |
if(sCsvLine.Length == 0) |
1495 |
return false; |
1496 |
|
1497 |
// split the csv line in parts |
1498 |
char[] delimiters = { ',' }; |
1499 |
string[] alCsvParts = sCsvLine.Split(delimiters); |
1500 |
|
1501 |
if(alCsvParts.Length < 1 || alCsvParts[0] != "ft") |
1502 |
return false; |
1503 |
|
1504 |
int iIndex = 1; |
1505 |
int iLength = alCsvParts.Length; |
1506 |
|
1507 |
if (iLength < (iIndex + 1)) |
1508 |
return false; |
1509 |
|
1510 |
// model triangles |
1511 |
int triCount = 0; |
1512 |
int.TryParse(alCsvParts[iIndex], out triCount); |
1513 |
iIndex++; |
1514 |
|
1515 |
if (triCount > 0) |
1516 |
{ |
1517 |
if (avModelTriangles == null || avModelTriangles.Length != triCount) |
1518 |
{ |
1519 |
avModelTriangles = new int[triCount]; |
1520 |
} |
1521 |
|
1522 |
for (int i = 0; i < triCount && iLength >= (iIndex + 1); i++) |
1523 |
{ |
1524 |
int v = 0; |
1525 |
|
1526 |
int.TryParse(alCsvParts[iIndex], out v); |
1527 |
iIndex++; |
1528 |
|
1529 |
avModelTriangles[i] = v; |
1530 |
} |
1531 |
|
1532 |
bGotModelTriangles = true; |
1533 |
bGotModelTrianglesFromDC = true; |
1534 |
} |
1535 |
|
1536 |
return true; |
1537 |
} |
1538 |
|
1539 |
|
1540 |
} |