在浏览器上使用Node.js和Javascript输出Kinect 2

我试图在Web浏览器上显示Kinect 2的输出,按照这里给出的教程, http://www.webondevices.com/xbox-kinect-2-javascript-gesture-tracking/

我已经能够在浏览器控制台中使用server.js中的代码将设备输出作为JSON对象

var Kinect2 = require('kinect2'), express = require('express'), app = express(), server = require('http').createServer(app), io = require('socket.io').listen(server); var kinect = new Kinect2(); app.use(express.static(__dirname + '/View')); app.use(express.static(__dirname + '/Script')); if(kinect.open()) { console.log('kinect opened'); server.listen(8000); console.log('Server listening on port 8000'); kinect.on('bodyFrame', function(bodyFrame){ io.sockets.emit('bodyFrame', bodyFrame); }); kinect.openBodyReader(); app.get('/', function(req, res) { res.sendFile(__dirname + '/View/output.html'); }); setTimeout(function(){ kinect.close(); console.log("Kinect Closed"); }, 100000); } 

output.html,我想要在canvas上显示输出的页面看起来像这样

 <html> <head> <title> Kinect Output On Canvas </title> <script src="https://cdn.socket.io/socket.io-1.3.5.js"></script> <link rel="stylesheet" href="/style.css" /> </head> <body> <h1>Kinect &amp; HTML5 WebSockets</h1> <canvas id="canvas" width="640" height="480"></canvas> <script> var socket = io.connect('http://localhost:8000/'); socket.on('bodyFrame', interpretData); function interpretData(bodyFrame) { // Web Socket message: console.log(bodyFrame); //outputs each bodyframe as a JSON object, 30+ frames/JSON objects in the browser per second } </script> </body> 

每个JSON对象的结构显示每个骨架跟踪的位置是这样的

  { bodyIndex: 5, tracked: true, trackingId: '72057594038115298', leftHandState: 1, rightHandState: 1, joints: [ { depthX: 0.24323934316635132, depthY: 0.5925129055976868, colorX: 0.33547070622444153, colorY: 0.6129662394523621, cameraX: -0.34261977672576904, cameraY: -0.10602515190839767, cameraZ: 0.9753329753875732, orientationX: -0.04046249017119408, orientationY: 0.9915661215782166, orientationZ: -0.05280650407075882, orientationW: 0.11122455447912216 }, { depthX: 0.21760234236717224, depthY: 0.3140539526939392, colorX: 0.31521913409233093, colorY: 0.2960273027420044, cameraX: -0.36364009976387024, cameraY: 0.19814369082450867, cameraZ: 0.9404330253601074, orientationX: -0.04830155894160271, orientationY: 0.9615150094032288, orientationZ: -0.04574603587388992, orientationW: 0.26657652854919434 }, 

……在每个跟踪的24个关节对应的联合数组中有24个参数相似的数组。

为了在浏览器中显示一个框架,我试过了

  var ctx = document.getElementById('canvas').getContext('2d'); ctx.fillStyle = "red"; ctx.fillRect(10, 10, 20, 20); var imgData = ctx.getImageData(10, 10, 15, 15); 

在接收到的每个JSON对象的for循环中

  ctx.putImageData(imgData, x, y); // x and y are the depth x positions of left and right hands 

这个输出是在canvas上的一个方形点,而在canvas的左上angular是另一个点,我明白为什么,因为这是给它的坐标。 我想知道如何解释我在Joints数组中获得的任何联合数据,并在浏览器上显示跟踪点,以便在浏览器中显示一个框架。 我正在检查Kinect Studio v2.0桌面应用程序中的实际跟踪结果和video输出。

任何build议将不胜感激

这是在浏览器屏幕上成功生成跟踪骨架的输出。 在output.html中,sockets.io接收JSON对象,并根据depthX参数使用Javascript创build相对于屏幕上每个点的项目。 在output.html中

  function interpretData(bodyFrame) { ctx.clearRect(0, 0, c.width, c.height); console.log(bodyFrame); for(var i = 0; i < bodyFrame.bodies.length; i++) { if(bodyFrame.bodies[i].tracked == true){ console.log('tracked'); for (var j = 0; j < bodyFrame.bodies[i].joints.length; j++) { var joint = bodyFrame.bodies[i].joints[j]; ctx.fillStyle = "#FF0000"; ctx.beginPath(); ctx.arc(joint.depthX*400, joint.depthY*400, 10, 0, Math.PI * 2, true); //multiplied with static integer 400 in order to adjust position on canvas as without it skeleton projection formed is only visible in a corner as DepthX values were always less than 1 ctx.closePath(); ctx.fill(); //drawing a circle for each joint on the canvas } } } }