Node.js和OpenCV

我无法通过npm opencv软件包进行面部跟踪和检测。 我现在只是试图画出一个圆圈。

我列出了下面的错误和下面的文件。 我不确定它是否只是一个具有约束力的问题,如果是的话,我应该采取什么措施来解决它。

OpenCV(2.4.8.2)通过brew FYI安装 – 这可能是我猜的版本

错误:

/Users/gwilliams2/Sites/facetracking/node_modules/opencv/lib/opencv.js:29 var face_cascade = new cv.CascadeClassifier(classifier); ^ TypeError: Error loading file at Matrix.matrix.detectObject (/Users/gwilliams2/Sites/facetracking/node_modules/opencv/lib/opencv.js:29:23) at /Users/gwilliams2/Sites/facetracking/server.js:170:8 at Socket.<anonymous> (/Users/gwilliams2/Sites/facetracking/server.js:161:11) at Socket.EventEmitter.emit [as $emit] (events.js:95:17) at SocketNamespace.handlePacket (/Users/gwilliams2/Sites/facetracking/node_modules/socket.io/lib/namespace.js:335:22) at Manager.onClientMessage (/Users/gwilliams2/Sites/facetracking/node_modules/socket.io/lib/manager.js:488:38) at WebSocket.Transport.onMessage (/Users/gwilliams2/Sites/facetracking/node_modules/socket.io/lib/transport.js:387:20) at Parser.<anonymous> (/Users/gwilliams2/Sites/facetracking/node_modules/socket.io/lib/transports/websocket/hybi-16.js:39:10) at Parser.EventEmitter.emit (events.js:95:17) at finish (/Users/gwilliams2/Sites/facetracking/node_modules/socket.io/lib/transports/websocket/hybi-16.js:288:16) 

client.js

 var APP = { // setup the web socket socket: io.connect('http://gaz.local:3000'), video: null, canvas: null, imgData: null, init: function (){ // get elements APP.video = $('#myVideo')[0]; APP.canvas = $('#myCanvas')[0]; // start app APP.main(); }, main: function(){ // if we can get a webcam if(APP.hasGetUserMedia()){ navigator.webkitGetUserMedia( {video:true, audio:false}, APP.videoSuccess, APP.videoError ); } else { alert('Sorry, you do not have webcam access'); } }, hasGetUserMedia: function (){ return !!(navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia); }, videoSuccess: function (stream){ // set the video to stream APP.video.src = window.URL.createObjectURL(stream); // set canvas size APP.canvas.width = APP.video.width; APP.canvas.height = APP.video.height; // draw video var i = 0; setTimeout(function(){ // darw the image to the canvas APP.canvas.getContext('2d').drawImage(APP.video, 0, 0); APP.imgData = APP.canvas.toDataURL('image/jpeg'); APP.imgData = APP.imgData.replace('data:image/jpeg;base64,', ''); // send the file in json format to the server var jsonData = JSON.stringify({imgData: APP.imgData, count: i}); APP.socket.emit('update', jsonData); // APP.socket.on('put_down', function (data) { // console.log(data); // }); // update the counter i++; }, 50); }, videoError: function (err){ alert('Error: ' + err); }, }; $(function (){ APP.init(); }); 

Server.js

 /* Setup a server, RTC and face tracking Install opencv brew install opencv NPM https://www.npmjs.org/package/fs https://www.npmjs.org/package/http https://www.npmjs.org/package/express https://www.npmjs.org/package/socket.io https://www.npmjs.org/package/opencv */ var APP = { // include some scripts server: null, port: 3000, express: require('express'), app: null, mdb: require('mongodb'), dbUrl: 'mongodb://127.0.0.1:27017/test', db: null, io: null, cv: require('opencv'), fs: require("fs"), init: function (){ // connect stuff up APP.app = APP.express(); APP.server = require('http').createServer(APP.app).listen(APP.port, '0.0.0.0'); APP.io = require('socket.io').listen(APP.server); // http routing APP.routing(); // connect the websocket APP.io.on('connection', function (socket){ console.log('Server started on port ' + APP.port); APP.main(socket); }); }, // open the db openDB: function (){ APP.mdb.connect(APP.dbURL, function(err, db){ if(err) throw err; APP.db = db; }); }, // close the db closeDB: function (){ APP.db.close(); }, // insert a file to the db dbInsert: function (col, data){ // open the db APP.openDB(); var collection = APP.db.collection(col); collection.insert(data, function(err, docs){ if(err){ console.warn(err.message); } else { console.log('Successfully inserted record'); } }); // close the db APP.closeDB(); }, // insert a file to the db dbUpdate: function (col, crit, data){ // open the db APP.openDB(); var collection = APP.db.collection(col); collection.update(crit, {$set: {hi: 'there'}}, {w:1}, function (){ if(err){ console.warn(err.message); } else { console.log('Successfully updated record'); } }); // close the db APP.closeDB(); }, // find a file in the db dbFind: function (col){ // open the db APP.openDB(); var collection = APP.db.collection(col); collection.find().toArray(function(err, results) { console.dir(results); }); // close the db APP.closeDB(); }, // routing files routing: function (){ // set directory to use for files prefixed with /library APP.app.use('/library', APP.express.static(__dirname + '/library')); // index page APP.app.get('/*', function (req, res){ // get query // console.log(req.query); // get file res.sendfile(__dirname + '/' + req.route.params); }); }, main: function (socket){ // APP.io.sockets.emit('put_down', {'fingers': 's'}); socket.on('update', function (data) { // console.log(data); // get the data data = JSON.parse(data); // create the file var filename = 'file' + data.count + '.jpg'; APP.fs.writeFile(filename, data.imgData, 'base64', function(err) { if(err) console.log(err); }); // open file with open cv APP.cv.readImage(filename, function(err, im){ // handle errors if(err) return err; console.log(im); // /Users/gwilliams2/Sites/facetracking/node_modules/opencv/data/haarcascade_frontalface_default.xml im.detectObject('./haarcascade_frontalface_default.xml', {}, function(err, faces){ for (var i=0;i<faces.length; i++){ var x = faces[i]; im.ellipse(xx + x.width/2, xy + x.height/2, x.width/2, x.height/2); } im.save('./out.png'); }); }); }); } }; // run the script APP.init(); 

解决了,首先,我已经错误地链接到XML文件,因为path是不正确的,如上面的评论中所build议的。 然后我注意到打开文件的缓冲区已经接近。 由于一切都是asynchronous的,服务器中用于分析文件的代码需要嵌套在文件保存callback中。