15_faceDetection.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #!/usr/bin/env python
  2. #version : 2023.12.31
  3. #language : ch
  4. from maix import camera
  5. from maix import display
  6. from maix import image
  7. from maix import nn
  8. from maix.nn import decoder
  9. import os
  10. ScreenOrientation = False
  11. try:
  12. if os.path.exists("/etc/cameraSize.cfg"):
  13. cameraSize = True
  14. else:
  15. cameraSize = False
  16. except:
  17. cameraSize = False
  18. def getLcdRotation(cameraCapture):
  19. global cameraSize
  20. if cameraSize:
  21. return lcdRotationNew(cameraCapture)
  22. else:
  23. return lcdRotation(cameraCapture)
  24. def lcdRotationNew(inputImg):
  25. global cameraSize,ScreenOrientation
  26. imageRotationBuffer = inputImg.crop(0, 0, 320, 240)
  27. if ScreenOrientation:
  28. imgRotationAim = image.new(size = (240, 320))
  29. rotationAngle = 90
  30. GETROTATION = imageRotationBuffer.rotate(+rotationAngle, adjust=1)
  31. else:
  32. imgRotationAim = image.new(size = (320, 240))
  33. GETROTATION = imageRotationBuffer
  34. GETROTATION = imgRotationAim.draw_image(GETROTATION,0,0,alpha=1)
  35. return GETROTATION
  36. def lcdRotation(inputImg):
  37. global cameraSize,ScreenOrientation
  38. imageRotationBuffer = inputImg.crop(0, 0, 240, 320)
  39. if ScreenOrientation:
  40. imgRotationAim = image.new(size = (240, 320))
  41. rotationAngle = 180
  42. else:
  43. imgRotationAim = image.new(size = (320, 240))
  44. rotationAngle = 90
  45. GETROTATION = imageRotationBuffer.rotate(+rotationAngle, adjust=1)
  46. GETROTATION = imgRotationAim.draw_image(GETROTATION,0,0,alpha=1)
  47. return GETROTATION
  48. if cameraSize==True:
  49. camera.camera.config(size=(320,240))
  50. else:
  51. camera.camera.config(size=(240,320))
  52. image.load_freetype("/root/preset/fonts/SourceHanSansCN-Regular.otf")
  53. model = {
  54. "param": "/root/preset/model/yolo2_face_awnn.param",
  55. "bin": "/root/preset/model/yolo2_face_awnn.bin"
  56. }
  57. labels = ["person"]
  58. options = {
  59. "model_type": "awnn",
  60. "inputs": {
  61. "input0": (224, 224, 3)
  62. },
  63. "outputs": {
  64. "output0": (7, 7, (1+4+len(labels))*5)
  65. },
  66. "mean": [127.5, 127.5, 127.5],
  67. "norm": [0.0078125, 0.0078125, 0.0078125],
  68. }
  69. anchors = [1.19, 1.98, 2.79, 4.59, 4.53, 8.92, 8.06, 5.29, 10.32, 10.65]
  70. m = nn.load(model, opt=options)
  71. yolo2_decoder = decoder.Yolo2(len(labels), anchors, net_in_size=(options["inputs"]["input0"][0], options["inputs"]["input0"][1]), net_out_size=(7, 7))
  72. canvasImg = image.new(size = (240, 320))
  73. while True:
  74. canvasImg.clear()
  75. img_facedetection = getLcdRotation(camera.capture())
  76. img_facedetection = img_facedetection.crop(0, 0,224, 224)
  77. out = m.forward(img_facedetection.tobytes(), quantize=True, layout="hwc")
  78. boxes, probs = yolo2_decoder.run(out, nms=0.3, threshold=0.3, img_size=(options["inputs"]["input0"][0], options["inputs"]["input0"][1]))
  79. if len(boxes):
  80. for i in (boxes):
  81. img_facedetection.draw_rectangle(i[0],i[1], int(i[0]+i[2]),int(i[1]+i[3]), color=(255,0,0), thickness=1)
  82. canvasImg.draw_image(img_facedetection,48,8)
  83. canvasImg.draw_image(image.open("/root/preset/img/exit_ff0000_24x24.png"),288,216,alpha=1)
  84. display.show(canvasImg)