zengyicheng 4 年 前
コミット
9cb1e50795
1 ファイル変更121 行追加52 行削除
  1. 121 52
      src/components/function.vue

+ 121 - 52
src/components/function.vue

@@ -1,7 +1,7 @@
 <template>
   <div style="width: 100%; height: calc(100% - 67.5px); background: #fff">
     <div class="img">
-      <div class="left">
+      <div class="left" style="width: 380px">
         <div class="pFace">
           <img src="../assets/img/face.png" alt />
         </div>
@@ -20,7 +20,13 @@
             autoplay
             v-show="isCamera"
           ></video>
-          <canvas ref="canvasDOM" width="195" height="156" class="kuang" v-show="isCamera"></canvas>
+          <canvas
+            ref="canvasDOM"
+            width="195"
+            height="156"
+            class="kuang"
+            v-show="isCamera"
+          ></canvas>
         </div>
         <div id="tou" ref="dv1">
           <img :src="img[2]" alt />
@@ -62,14 +68,31 @@
           <div class="save" @click="photograph" v-if="isCamera">
             <img src="../assets/img/save.png" alt />
           </div>
-          <div class="spot" @click="fnRun" v-if="isCamera">
-            开始识别
-          </div>
+          <div class="spot" @click="fnRun" v-if="isCamera">开始识别</div>
         </div>
         <!--确认-->
+        <div v-show="false" class="isPhoto">
+          <!--canvas截取流-->
+          <canvas ref="canvas" width="300" height="240" v-show="false"></canvas>
+        </div>
         <div v-if="isCamera" class="isPhoto">
           <!--canvas截取流-->
-          <canvas ref="canvas" width="300" height="240" v-if="isCamera"></canvas>
+          <span>截取的图片</span>
+          <div style="width:500px;overflow:auto;display:flex">
+            <img
+              v-for="(res, index) in sampleArr"
+              :key="index"
+              :src="res.img[0]"
+              alt=""
+              style="margin-right:10px"
+            />
+          </div>
+        </div>
+        <div class="sbh" v-if="resultImg.name">
+          <div class="spotPhoto">
+            <img :src="resultImg.img[0]" alt="" />
+          </div>
+          <div class="spotNumber">{{ resultImg.name }}</div>
         </div>
       </div>
     </div>
@@ -88,7 +111,7 @@ export default {
         require("../assets/img/light/screan.png"),
         require("../assets/img/ai.png"),
         require("../assets/img/tou1.png"),
-        require("../assets/img/policeNew.png")
+        require("../assets/img/policeNew.png"),
       ],
       isCamera: false,
       count: 0,
@@ -96,21 +119,26 @@ export default {
       isdetected: "请您保持脸部在画面中央",
       videoEl: {},
       canvasEL: {},
+      resultImg: {
+        img: [],
+        name: "",
+      },
       // 预设样本图,支持本地,网络,beas64
       sampleArr: [
-        {
-          name: "编号1",
-          img: []
-        }
+        // {
+        // name: "编号1",
+        // img: []
+        // }
       ],
       // 匹配图,支持本地,网络,beas64
       detArr: [
         //"" 图片1
       ],
+      numberOne: 0,
       // 匹配结果
       resultArr: [],
       // 人脸匹配矩阵数组对象转码结果
-      faceMatcher: null
+      faceMatcher: null,
     };
   },
   methods: {
@@ -120,9 +148,9 @@ export default {
       // H5调用电脑摄像头API
       navigator.mediaDevices
         .getUserMedia({
-          video: true
+          video: true,
         })
-        .then(success => {
+        .then((success) => {
           _this.isCamera = true;
           // 摄像头开启成功
           _this.$refs["video"].srcObject = success;
@@ -143,10 +171,10 @@ export default {
             tracker.setStepSize(2);
             tracker.setEdgesDensity(0.1);
             window.tracking.track("#video_cam", tracker, { camera: true });
-            tracker.on("track", function(event) {
+            tracker.on("track", function (event) {
               const { autoCaptureTrackTraking } = _this;
               canvasContext.clearRect(0, 0, canvas.width, canvas.height);
-              event.data.forEach(function({ x, y, width, height }) {
+              event.data.forEach(function ({ x, y, width, height }) {
                 canvasContext.strokeStyle = "#FFFF";
                 canvasContext.strokeRect(x + 10, y - 30, width - 20, height);
                 canvasContext.font = "11px Helvetica";
@@ -168,7 +196,7 @@ export default {
             });
           }
         })
-        .catch(error => {
+        .catch((error) => {
           // console.error("摄像头开启失败,请检查摄像头是否可用!");
           _this.$message.error("摄像头开启失败,请检查摄像头是否可用!");
         });
@@ -187,12 +215,18 @@ export default {
       let size = (fileLength / 1024).toFixed(2);
       console.log(size); // 上传拍照信息  调用接口上传图片 .........
 
-      this.detArr.push(imgBase64);
-      var json = {name:"",img:[]};
-      json.name = "编号:" + this.number++;
+      // this.detArr.push(imgBase64);
+      var json = { name: "", img: [] };
+      this.number = this.number + 1;
+      json.name = "编号:" + this.number;
       json.img.push(imgBase64);
       this.sampleArr.push(json);
-      this.fnInit()
+      if (this.sampleArr.length > 0) {
+        var a = document.getElementsByClassName("spot");
+        a[0].style.display = "block";
+        this.fnsample();
+      }
+      //this.fnInit()
       // this.resultArr.push(imgBase64);
 
       // 保存到本地
@@ -214,7 +248,7 @@ export default {
       }
       let stream = this.$refs["video"].srcObject;
       let tracks = stream.getTracks();
-      tracks.forEach(track => {
+      tracks.forEach((track) => {
         track.stop();
       });
       this.$refs["video"].srcObject = null;
@@ -226,36 +260,53 @@ export default {
       // 加载模型
       await faceapi.loadFaceRecognitionModel("/static/models");
       // 生成人脸匹配矩阵数组对象,样本图片同步转码
-      // const labeledFaceDescriptors = await Promise.all(
-      //   this.sampleArr.map(async item => {
-      //     // 临时图片转码数据,将图片对象转数据矩阵对象
-      //     let descriptors = [];
-      //     for (let image of item.img) {
-      //       const imageEl = await faceapi.fetchImage(image);
-      //       descriptors.push(await faceapi.computeFaceDescriptor(imageEl));
-      //     }
-      //     // 返回图片用户和图片转码数组
-      //     return new faceapi.LabeledFaceDescriptors(item.name, descriptors);
-      //   })
-      // );
-      // // 人脸匹配矩阵数组对象转码结果
-      // this.faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors);
+    },
+    async fnsample() {
+      const labeledFaceDescriptors = await Promise.all(
+        this.sampleArr.map(async (item) => {
+          // 临时图片转码数据,将图片对象转数据矩阵对象
+          let descriptors = [];
+          for (let image of item.img) {
+            const imageEl = await faceapi.fetchImage(image);
+            descriptors.push(await faceapi.computeFaceDescriptor(imageEl));
+          }
+          // 返回图片用户和图片转码数组
+          return new faceapi.LabeledFaceDescriptors(item.name, descriptors);
+        })
+      );
+      // 人脸匹配矩阵数组对象转码结果
+      this.faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors);
     },
     // 执行遍历识别匹配图片,数值误差越小越精确
     fnRun() {
-      this.detArr.forEach(async img => {
+      let ctx = this.$refs["canvas"].getContext("2d");
+      // 把当前视频帧内容渲染到canvas上
+      ctx.drawImage(this.$refs["video"], 0, 0, 300, 240);
+      // 转base64格式、图片格式转换、图片质量压缩
+      let imgBase64 = this.$refs["canvas"].toDataURL("image/jpeg", 0.7); // 由字节转换为KB 判断大小
+      this.detArr = [];
+      this.detArr.push(imgBase64);
+      this.detArr.forEach(async (img) => {
         let ts = Date.now();
         // 将图片对象转数据矩阵对象,进行匹配
         const inputEl = await faceapi.fetchImage(img);
         const inputDescriptor = await faceapi.computeFaceDescriptor(inputEl);
         const bestMatch = await this.faceMatcher.findBestMatch(inputDescriptor);
         // 结果
+        this.resultArr = [];
         this.resultArr.push({
           target: img,
           result: bestMatch.toString(),
           time: Date.now() - ts + "ms",
-          fps: Math.round(1000 / (Date.now() - ts))
+          fps: Math.round(1000 / (Date.now() - ts)),
         });
+        console.log(this.resultArr);
+        for (var i = 0; i < this.sampleArr.length; i++) {
+          if (this.sampleArr[i].name == bestMatch.label) {
+            this.resultImg.name = this.sampleArr[i].name;
+            this.resultImg.img[0] = this.sampleArr[i].img[0];
+          }
+        }
       });
     },
     // 更换匹配图
@@ -264,7 +315,7 @@ export default {
       this.detArr = [];
       this.resultArr = [];
       // 将文件显示为图像并识别
-      e.target.files.forEach(async file => {
+      e.target.files.forEach(async (file) => {
         let ts = Date.now();
         let img = await faceapi.bufferToImage(file);
         const inputDescriptor = await faceapi.computeFaceDescriptor(img);
@@ -275,10 +326,10 @@ export default {
           target: file.name,
           result: bestMatch.toString(),
           time: Date.now() - ts + "ms",
-          fps: Math.round(1000 / (Date.now() - ts))
+          fps: Math.round(1000 / (Date.now() - ts)),
         });
       });
-    }
+    },
   },
   mounted() {
     // console.log(this.$store.state.function);
@@ -296,6 +347,7 @@ export default {
     // document.head.appendChild(_s1);
     // console.log(this.$store.state.function);
     this.$nextTick(() => {
+      this.fnInit();
       // this.fnInit().then(() => this.fnRun());
     });
 
@@ -305,7 +357,7 @@ export default {
     }
     this.videoEl = this.$refs.video;
     this.canvasEL = this.$refs.canvasDOM;
-  }
+  },
 };
 </script>
 
@@ -449,7 +501,8 @@ body {
 .close > img,
 .save > img,
 .pFace > img,
-.open > img {
+.open > img,
+.spotPhoto > img {
   width: 100%;
   height: 100%;
 }
@@ -473,15 +526,31 @@ body {
   color: #ccc;
 }
 
-.spot{
+.spot {
   background: #64ff64;
-    color: #fff;
-    width: 140px;
-    height: 40px;
-    text-align: center;
-    line-height: 40px;
-    border-radius: 20px;
-    margin-top: 25px;
-    cursor: pointer;
+  color: #fff;
+  width: 140px;
+  height: 40px;
+  text-align: center;
+  line-height: 40px;
+  border-radius: 20px;
+  margin-top: 25px;
+  cursor: pointer;
+  display: none;
+}
+
+.sbh {
+  text-align: center;
+  margin: 0 auto;
+}
+
+.spotPhoto {
+  width: 300px;
+  height: 245px;
+}
+
+.spotPhoto {
+  margin-top: 20px;
+  font-size: 20px;
 }
 </style>