index.html 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. <!DOCTYPE html>
  2. <html lang="en">
  3. <meta charset="utf-8" />
  4. <head>
  5. <script src="./model/jszip.min.js"></script>
  6. <script src="./model/tf.min.js"></script>
  7. <script>
  8. async function main() {
  9. const video = document.querySelector('video');
  10. const canvas = document.querySelector('canvas');
  11. const select = document.querySelector('select');
  12. const startRecordingBtn = document.getElementById("startRecording")
  13. const load = document.getElementById("load")
  14. let show = false
  15. const setTime = setInterval(()=>{
  16. if(show){
  17. startRecordingBtn.classList.remove("disabled")
  18. startRecordingBtn.disabled = false
  19. load.classList.remove('loading')
  20. clearInterval(setTime)
  21. }
  22. },500)
  23. video.width = 700;
  24. video.height = 700;
  25. const webcam = await tf.data.webcam(video);
  26. const model = await tf.loadGraphModel('/model/model.json');
  27. // Set initial recurrent state
  28. let [r1i, r2i, r3i, r4i] = [tf.tensor(0.), tf.tensor(0.), tf.tensor(0.), tf.tensor(0.)];
  29. // Set downsample ratio
  30. const downsample_ratio = tf.tensor(0.5);
  31. // Inference loop
  32. while (true) {
  33. await tf.nextFrame();
  34. const img = await webcam.capture();
  35. const src = tf.tidy(() => img.expandDims(0).div(255)); // normalize input
  36. const [fgr, pha, r1o, r2o, r3o, r4o] = await model.executeAsync(
  37. {src, r1i, r2i, r3i, r4i, downsample_ratio}, // provide inputs
  38. ['fgr', 'pha', 'r1o', 'r2o', 'r3o', 'r4o'] // select outputs
  39. );
  40. show = true
  41. // Draw the result based on selected view
  42. const viewOption = "white";
  43. if (viewOption === 'recurrent1') {
  44. drawHidden(r1o, canvas);
  45. } else if (viewOption === 'recurrent2') {
  46. drawHidden(r2o, canvas);
  47. } else if (viewOption === 'recurrent3') {
  48. drawHidden(r3o, canvas);
  49. } else if (viewOption === 'recurrent4') {
  50. drawHidden(r4o, canvas);
  51. } else if (viewOption === 'white') {
  52. drawMatte(fgr.clone(), pha.clone(), canvas);
  53. canvas.style.background = 'rgb(255, 255, 255)';
  54. } else if (viewOption === 'green') {
  55. drawMatte(fgr.clone(), pha.clone(), canvas);
  56. canvas.style.background = 'rgb(120, 255, 155)';
  57. } else if (viewOption === 'alpha') {
  58. drawMatte(null, pha.clone(), canvas);
  59. canvas.style.background = 'rgb(0, 0, 0)';
  60. } else if (viewOption === 'foreground') {
  61. drawMatte(fgr.clone(), null, canvas);
  62. }
  63. // Dispose old tensors.
  64. tf.dispose([img, src, fgr, pha, r1i, r2i, r3i, r4i]);
  65. // Update recurrent states.
  66. [r1i, r2i, r3i, r4i] = [r1o, r2o, r3o, r4o];
  67. }
  68. }
  69. async function drawMatte(fgr, pha, canvas){
  70. const rgba = tf.tidy(() => {
  71. const rgb = (fgr !== null) ?
  72. fgr.squeeze(0).mul(255).cast('int32') :
  73. tf.fill([pha.shape[1], pha.shape[2], 3], 255, 'int32');
  74. const a = (pha !== null) ?
  75. pha.squeeze(0).mul(255).cast('int32') :
  76. tf.fill([fgr.shape[1], fgr.shape[2], 1], 255, 'int32');
  77. return tf.concat([rgb, a], -1);
  78. });
  79. fgr && fgr.dispose();
  80. pha && pha.dispose();
  81. const [height, width] = rgba.shape.slice(0, 2);
  82. const pixelData = new Uint8ClampedArray(await rgba.data());
  83. const imageData = new ImageData(pixelData, width, height);
  84. canvas.width = width;
  85. canvas.height = height;
  86. canvas.getContext('2d').putImageData(imageData, 0, 0);
  87. rgba.dispose();
  88. }
  89. async function drawHidden(r, canvas) {
  90. const rgba = tf.tidy(() => {
  91. r = r.unstack(-1)
  92. r = tf.concat(r, 1)
  93. r = r.split(4, 1)
  94. r = tf.concat(r, 2)
  95. r = r.squeeze(0)
  96. r = r.expandDims(-1)
  97. r = r.add(1).mul(127.5).cast('int32')
  98. r = r.tile([1, 1, 3])
  99. r = tf.concat([r, tf.fill([r.shape[0], r.shape[1], 1], 255, 'int32')], -1)
  100. return r;
  101. });
  102. const [height, width] = rgba.shape.slice(0, 2);
  103. const pixelData = new Uint8ClampedArray(await rgba.data());
  104. const imageData = new ImageData(pixelData, width, height);
  105. canvas.width = width;
  106. canvas.height = height;
  107. canvas.getContext('2d').putImageData(imageData, 0, 0);
  108. rgba.dispose();
  109. }
  110. window.addEventListener('load', main);
  111. </script>
  112. <title>脊柱识别</title>
  113. <style>
  114. body{
  115. height: 100vh;
  116. margin: 0;
  117. padding: 0;
  118. position: relative;
  119. }
  120. #myCanvas{
  121. width: 700px;
  122. height: 700px;
  123. border: 2px slategray solid;
  124. border-radius: 8px;
  125. }
  126. button{
  127. padding: 10px 15px;
  128. border: none;
  129. border-radius: 8px;
  130. cursor: pointer;
  131. }
  132. .leftCanvasLine{
  133. position: absolute;
  134. height: 99%;
  135. top: 50%;
  136. left: 58%;
  137. transform: translate(0,-50%);
  138. border: 2px dashed #fc5531;
  139. }
  140. .rightCanvasLine{
  141. position: absolute;
  142. height: 99%;
  143. top: 50%;
  144. left: 42%;
  145. transform: translate(0,-50%);
  146. border: 2px dashed #fc5531;
  147. }
  148. .loading{
  149. display: block !important;
  150. position: fixed;
  151. width: 100vw;
  152. height: 100vh;
  153. background-color: rgba(255, 255, 255, 0.9);
  154. top: 0;
  155. }
  156. .loading div{
  157. text-align: center;
  158. position: absolute;
  159. top: 50%;
  160. left: 50%;
  161. transform: translate(-50%,-50%);
  162. }
  163. .loading div img{
  164. width: 100px;
  165. }
  166. </style>
  167. </head>
  168. <body>
  169. <div style="text-align: center;height: 100%;">
  170. <h3 style="padding: 10px 0;margin: 0;opacity: 0.7;">这是一个基于tensorflow处理的检测脊柱是否弯曲的一个平台,在一段时间内,一个人从直立到弯腰的过程,通过AI算法来判断这个人的脊柱是否有问题</h3>
  171. <h1 style="opacity: 0.7;">请进入灰色框里面,并站在虚线中间的位置</h1>
  172. <button id="startRecording" disabled>开始识别</button>
  173. <div style="position: relative;margin-top: 15px;">
  174. <video id="preview" style="display: none;"></video>
  175. <canvas id="myCanvas"></canvas>
  176. <div class="leftCanvasLine"></div>
  177. <div class="rightCanvasLine"></div>
  178. </div>
  179. </div>
  180. <div class="loading" id="load" style="display: none;">
  181. <div>
  182. <img src="./loading.gif" alt="">
  183. <h3>加载中...</h3>
  184. </div>
  185. </div>
  186. </body>
  187. </html>
  188. <script src="./index.js"></script>