BLE
Connect
Send Data
import json from maix import mjpg from maix import utils import base64 import time import os import sys sys.path.append("/root/") import http.client import smbus2 from CocoPi import singleRgb singleRgb=singleRgb() import requests from CocoPi import multiFuncGpio import _coco_mfcc from maix import camera from maix import display from maix import image from maix import nn from maix.nn.app import face from maix.nn.app.face import FaceRecognize from maix.nn import decoder class Face_Recognizer: def __init__(self, threshold = 0.5, nms = 0.3, max_face_num = 1): model = "/root/preset/model/retinaface.mud" model_fe = "/root/preset/model/fe_resnet.mud" self.input_size = (224, 224, 3) input_size_fe = (128, 128, 3) self.feature_len = 256 self.features = [] print("-- load model:", model) m = nn.load(model) print("-- load ok") print("-- load model:", model_fe) m_fe = nn.load(model_fe) print("-- load ok") self.recognizer = FaceRecognize(m, m_fe, self.feature_len, self.input_size, threshold, nms, max_face_num) print("-- init end") def get_faces(self, img, std_img = False): faces = self.recognizer.get_faces(img, std_img) return faces def __len__(self): return len(self.features) def add_user(self, name, feature): self.features.append([name, feature]) return True def remove_user(self, name_del): rm = None for name, feature in self.features: if name_del == name: rm = [name, feature] if rm: self.features.remove(rm) return True return False def recognize(self, feature): max_score = 0 uid = -1 for i, user in enumerate(self.features): score = self.recognizer.compare(user[1], feature) if score > max_score: max_score = score uid = i if uid >= 0: return self.features[uid][0], max_score return None, 0 def get_input_size(self): return self.input_size def get_feature_len(self): return self.feature_len def darw_info(self, img, box, points, disp_str, bg_color=(255, 0, 0, 255), font_color=(255, 255, 255, 255), font_size=32): font_wh = image.get_string_size(disp_str) for p in points: img.draw_rectangle(p[0] - 1, p[1] -1, p[0] + 1, p[1] + 1, color=bg_color) img.draw_rectangle(box[0], box[1], box[0] + box[2], box[1] + box[3], color=bg_color, thickness=2) if disp_str: img.draw_rectangle(box[0], box[1] - font_wh[1], box[0] + font_wh[0], box[1], color=bg_color, thickness = -1) img.draw_string(box[0], box[1] - font_wh[1], disp_str, color=font_color) def map_face(self,box,points,IMAGEFACESHOW): # print(box,points) for p in points: IMAGEFACESHOW.draw_rectangle(p[0] - 1, p[1] -1, p[0] + 1, p[1] + 1, color=(255,200,255)) #return box,points def darw_title(self, img, dis_size ,key_l = None, key_r =None): if key_C: key_l = "| "+ key_l img.draw_string( 1, 2 ,key_l , scale = 1, color = (255, 255, 255), thickness = 2) if key_D: key_r = key_r+" |" w = int(dis_size[0] - 4 - image.get_string_size(key_r)[0] * 1) img.draw_string( w, 2 ,key_r , scale = 1, color = (255, 255, 255), thickness = 2) FACERECGNIZER = Face_Recognizer(0.5, 0.3, max_face_num = 4) try: with open("/root/user/model/recorded_face_features.py", "r") as file: FACERECGNIZER.features = json.loads(file.read()) except: pass def v831_display_show_img_recognized(displayShow): global _img_recognized_y,_img_recognized_x,ScreenOrientation,cameraSize CANVASSHOWIMGAGE = "" if ScreenOrientation: displayShowCanvas = image.new(size = (240, 320)) displayShowCanvas.draw_rectangle(0,0,240,320, color=(0,0,0), thickness=-1) displayShowCanvas.draw_image(displayShow,_img_recognized_x,_img_recognized_y,alpha=1) displayShowVER = displayShowCanvas.crop(0,0,240,320) displayShowVER = displayShowVER.rotate(-90, adjust=1) display.show(displayShowVER) else: displayShowCanvas = image.new(size = (320, 240)) displayShowCanvas.draw_rectangle(0,0,320,240, color=(0,0,0), thickness=-1) displayShowCanvas.draw_image(displayShow,_img_recognized_x,_img_recognized_y,alpha=1) display.show(displayShowCanvas) def _E4_BA_BA_E8_87_89_E8_AD_98_E5_88_A5(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST FACESRECOGNITONRESULT = FACERECGNIZER.get_faces(img_recognized) if len(FACESRECOGNITONRESULT): for i in FACESRECOGNITONRESULT: FACERECGNIZER.map_face(i[1],i[2],img_recognized) if FACERECGNIZER.recognize(i[3])[1] > 70: img_recognized.draw_string(0,0, FACERECGNIZER.recognize(i[3])[0], scale = 1, color = (51,204,0) , thickness = 1) img_recognized.draw_rectangle(i[1][0],i[1][1], i[1][0]+i[1][2],i[1][1]+ i[1][3], color=(51,204,0), thickness=1) state = "歡迎語" else: img_recognized.draw_rectangle(i[1][0],i[1][1], i[1][0]+i[1][2],i[1][1]+ i[1][3], color=(255,255,255), thickness=1) v831_display_show_img_recognized(img_recognized) def getNetworkDate_noexit(): global getDateNum try: coon = http.client.HTTPConnection("www.baidu.com") coon.request("GET","/") r = coon.getresponse() ts = r.getheader("date") GMT_time = time.strptime(ts[5:25],"%d %b %Y %H:%M:%S") BeiJing_time = time.localtime(time.mktime(GMT_time) + 8*60*60) format_time = time.strftime("%Y-%m-%d %H:%M:%S",BeiJing_time) command = "date -s "+"\"{}\"".format(format_time) os.system(command) getDateNum = 1 # sys.exit() except: pass def wifi_is_content(): global getDateNum cmd = "ping -c 4 www.baidu.com" res = os.popen(cmd).read() data = False if res: data = True return data def Wi_Fi_E9_80_A3_E6_8E_A5(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST WiFiSSID = "CocoRobo_SZ" WiFiPSD = "cocorobo2019" os.system("wifi_disconnect_ap_test") os.system("wifi_connect_chinese_ap_test "+WiFiSSID+" "+WiFiPSD+"") #CLIENT = ntplib.NTPClient() #RESPONSE = CLIENT.request('127.0.0.1') getNetworkDate_noexit() while not (wifi_is_content()): singleRgb.setBrightness(5) singleRgb.setColor(255,0,0) time.sleep(0.05) singleRgb.show() singleRgb.setBrightness(5) singleRgb.setColor(0,255,0) time.sleep(0.05) singleRgb.show() def v831_display_show_canvas(displayShow): global _canvas_y,_canvas_x,ScreenOrientation,cameraSize CANVASSHOWIMGAGE = "" if ScreenOrientation: displayShowCanvas = image.new(size = (240, 320)) displayShowCanvas.draw_rectangle(0,0,240,320, color=(0,0,0), thickness=-1) displayShowCanvas.draw_image(displayShow,_canvas_x,_canvas_y,alpha=1) displayShowVER = displayShowCanvas.crop(0,0,240,320) displayShowVER = displayShowVER.rotate(-90, adjust=1) display.show(displayShowVER) else: displayShowCanvas = image.new(size = (320, 240)) displayShowCanvas.draw_rectangle(0,0,320,240, color=(0,0,0), thickness=-1) displayShowCanvas.draw_image(displayShow,_canvas_x,_canvas_y,alpha=1) display.show(displayShowCanvas) def _E8_AA_9E_E9_9F_B3_E8_AD_98_E5_88_A5(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST if RecordState == 0: mfcc.recognize() recordResultData = None time.sleep(1) RecordState = 1 if RecordState == 1: recordData = mfcc.state() if recordData == mfcc._mfcc_result: recordResultData = mfcc.result() time.sleep(1) RecordState = 0 canvas.clear() if recordResultData == 0: canvas.draw_string(0,30, "I want to borrow a ball.", scale = 1, color = (51,204,0) , thickness = 1) v831_display_show_canvas(canvas) state = "借球提示語" elif recordResultData == 1: canvas.draw_string(0,30, "Here's the ball back!", scale = 1, color = (51,204,0) , thickness = 1) v831_display_show_canvas(canvas) time.sleep(1) _E9_82_84_E7_90_83_E6_8F_90_E7_A4_BA_E8_AA_9E() time.sleep(2) state = "體育用品識別" elif recordResultData == 2: canvas.draw_string(0,30, "Basketball", scale = 1, color = (51,204,0) , thickness = 1) v831_display_show_canvas(canvas) _E7_B1_83_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6() _COCOCLOUD_SEND_ENDPOINT = "http://api.cocorobo.cn/iot/data/eventAPIKeyJson/3314aaaae706e22e79f625b6be4bac17" _COCOCLOUD_SEND_DATA = {"Basketball":0} try: _COCOCLOUD_SEND_REQUEST = requests.post(_COCOCLOUD_SEND_ENDPOINT, json = _COCOCLOUD_SEND_DATA , headers = { "Content-type": "application/json" }, timeout = 60) print(str(_COCOCLOUD_SEND_REQUEST.status_code)+", "+str(_COCOCLOUD_SEND_REQUEST.content)) except BaseException as e: print(e) pass state = "人臉識別" elif recordResultData == 3: canvas.draw_string(0,30, "Volleyball", scale = 1, color = (51,204,0) , thickness = 1) v831_display_show_canvas(canvas) _E6_8E_92_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6() _COCOCLOUD_SEND_ENDPOINT = "http://api.cocorobo.cn/iot/data/eventAPIKeyJson/3314aaaae706e22e79f625b6be4bac17" _COCOCLOUD_SEND_DATA = {"Volleyball":0} try: _COCOCLOUD_SEND_REQUEST = requests.post(_COCOCLOUD_SEND_ENDPOINT, json = _COCOCLOUD_SEND_DATA , headers = { "Content-type": "application/json" }, timeout = 60) print(str(_COCOCLOUD_SEND_REQUEST.status_code)+", "+str(_COCOCLOUD_SEND_REQUEST.content)) except BaseException as e: print(e) pass state = "人臉識別" def _E9_82_84_E7_90_83_E6_8F_90_E7_A4_BA_E8_AA_9E(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST canvas.clear() if ScreenOrientation: canvas = image.new(size = (240, 320), color = (255,255,255), mode = "RGB") else: canvas = image.new(size = (320, 240), color = (255,255,255), mode = "RGB") canvas.draw_string(2,60, "請將體育用品", scale = 2.8, color = (102,0,204) , thickness = 1) canvas.draw_string(2,140, "放在鏡頭前面", scale = 2.8, color = (102,51,255) , thickness = 1) canvas.draw_string(2,220, "進行識別", scale = 2.8, color = (102,51,255) , thickness = 1) v831_display_show_canvas(canvas) def _E6_AD_A1_E8_BF_8E_E8_AA_9E(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST canvas.draw_string(0,0, str("Hi,") + str(FACERECGNIZER.recognize(i[3])[0]), scale = 3, color = (255,255,255) , thickness = 1) canvas.draw_string(5,50, "歡迎使用CocoPi", scale = 2, color = (255,255,255) , thickness = 1) canvas.draw_string(5,90, "體育用品共享櫃", scale = 2, color = (255,255,255) , thickness = 1) canvas.draw_rectangle(0,140, 0+240,140+ 180, color=(255,255,255), thickness=-1) canvas.draw_string(0,150, "請說出", scale = 2, color = (51,102,255) , thickness = 1) canvas.draw_string(0,200, "I want to borrow a ball.", scale = 1.4, color = (51,102,255) , thickness = 1) canvas.draw_string(120,240, "或", scale = 1.5, color = (51,102,255) , thickness = 1) canvas.draw_string(0,280, "Here's the ball back!", scale = 1.4, color = (51,102,255) , thickness = 1) v831_display_show_canvas(canvas) S1= multiFuncGpio(0,1) S2= multiFuncGpio(1,1) mfcc = _coco_mfcc.MFCC(is_reply=False) cameraSize = True def CAMERATYPE(): global cameraSize if os.path.exists("/etc/cameraSize.cfg"): cameraSize = True else: cameraSize = False CAMERATYPE() image.load_freetype("/root/preset/fonts/simhei.ttf") max_face_num = 4 detect_threshold = 0.5 detect_nms = 0.3 #score_threshold = 70 FEATURES = [] def lcdRotation(inputImg): global SETVFLIP,SETHMIRROT,cameraSize,ScreenOrientation imageRotationBuffer = inputImg.crop(0, 0, 240, 320) if ScreenOrientation: imgRotationAim = image.new(size = (240, 320)) rotationAngle = 180 else: imgRotationAim = image.new(size = (320, 240)) rotationAngle = 90 GETROTATION = imageRotationBuffer.rotate(+rotationAngle, adjust=1) GETROTATION = imgRotationAim.draw_image(GETROTATION,0,0,alpha=1) if SETVFLIP and not SETHMIRROT: GETROTATIONs = GETROTATION.flip(0) if SETHMIRROT and not SETVFLIP: GETROTATIONs = GETROTATION.flip(1) if SETVFLIP and SETHMIRROT: GETROTATION1 = GETROTATION.flip(0) GETROTATION = GETROTATION1.flip(1) return GETROTATION def lcdRotationNew(inputImg): global SETVFLIP,SETHMIRROT,cameraSize,ScreenOrientation imageRotationBuffer = inputImg.crop(0, 0, 320, 240) if ScreenOrientation: imgRotationAim = image.new(size = (240, 320)) rotationAngle = 90 GETROTATION = imageRotationBuffer.rotate(+rotationAngle, adjust=1) else: imgRotationAim = image.new(size = (320, 240)) GETROTATION = imageRotationBuffer GETROTATION = imgRotationAim.draw_image(GETROTATION,0,0,alpha=1) if SETVFLIP and not SETHMIRROT: GETROTATIONs = GETROTATION.flip(0) elif SETHMIRROT and not SETVFLIP: GETROTATIONs = GETROTATION.flip(1) elif SETVFLIP and SETHMIRROT: GETROTATION1 = GETROTATION.flip(0) GETROTATION = GETROTATION1.flip(1) return GETROTATION def getLcdRotation(cameraCapture): global cameraSize if cameraSize: return lcdRotationNew(cameraCapture) else: return lcdRotation(cameraCapture) def _E9_AB_94_E8_82_B2_E7_94_A8_E5_93_81_E8_AD_98_E5_88_A5(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST out = Yolo.model.forward(img_recognized, quantize=True, layout="hwc") boxes, probs = Yolo.decoder.run(out, nms=0.3, threshold=0.5, img_size=(224, 224)) if len(boxes): for boxesi, box in enumerate(boxes): boxes[boxesi].append(probs[boxesi]) if len(boxes): for i in (boxes): img_recognized.draw_string((i[0]),(i[1]), (Yolo.labels[i[4][0]]), scale = 1, color = (255,0,0) , thickness = 1) img_recognized.draw_rectangle((i[0]),(i[1]),(i[0]+i[2]),(i[1]+i[3]), color=(255,0,0), thickness=1) if (Yolo.labels[i[4][0]]) == "lanqiu": _E7_B1_83_E7_90_83_E6_AD_B8_E9_82_84() _E7_B1_83_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6() _COCOCLOUD_SEND_ENDPOINT = "http://api.cocorobo.cn/iot/data/eventAPIKeyJson/3314aaaae706e22e79f625b6be4bac17" _COCOCLOUD_SEND_DATA = {"Basketball":1} try: _COCOCLOUD_SEND_REQUEST = requests.post(_COCOCLOUD_SEND_ENDPOINT, json = _COCOCLOUD_SEND_DATA , headers = { "Content-type": "application/json" }, timeout = 60) print(str(_COCOCLOUD_SEND_REQUEST.status_code)+", "+str(_COCOCLOUD_SEND_REQUEST.content)) except BaseException as e: print(e) pass canvas.clear() state = "人臉識別" elif (Yolo.labels[i[4][0]]) == "paiqiu": _E6_8E_92_E7_90_83_E6_AD_B8_E9_82_84() _E6_8E_92_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6() _COCOCLOUD_SEND_ENDPOINT = "http://api.cocorobo.cn/iot/data/eventAPIKeyJson/3314aaaae706e22e79f625b6be4bac17" _COCOCLOUD_SEND_DATA = {"Volleyball":1} try: _COCOCLOUD_SEND_REQUEST = requests.post(_COCOCLOUD_SEND_ENDPOINT, json = _COCOCLOUD_SEND_DATA , headers = { "Content-type": "application/json" }, timeout = 60) print(str(_COCOCLOUD_SEND_REQUEST.status_code)+", "+str(_COCOCLOUD_SEND_REQUEST.content)) except BaseException as e: print(e) pass canvas.clear() state = "人臉識別" v831_display_show_img_recognized(img_recognized) def _E7_B1_83_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST S1.servoCtrl(180) time.sleep(2) S1.servoCtrl(75) def _E6_8E_92_E7_90_83_E6_AB_83_E9_96_80_E6_8E_A7_E5_88_B6(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST S2.servoCtrl(180) time.sleep(2) S2.servoCtrl(70) def _E5_80_9F_E7_90_83_E6_8F_90_E7_A4_BA_E8_AA_9E(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST if ScreenOrientation: canvas = image.new(size = (240, 320), color = (255,255,255), mode = "RGB") else: canvas = image.new(size = (320, 240), color = (255,255,255), mode = "RGB") canvas.draw_string(10,50, "請說出", scale = 3, color = (102,0,204) , thickness = 1) canvas.draw_string(10,120, "你要借的體", scale = 3, color = (102,51,255) , thickness = 1) canvas.draw_string(10,190, "育用品英文", scale = 3, color = (102,51,255) , thickness = 1) v831_display_show_canvas(canvas) def _E6_8E_92_E7_90_83_E6_AD_B8_E9_82_84(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST if ScreenOrientation: canvas = image.new(size = (240, 320), color = (0,100,0), mode = "RGB") else: canvas = image.new(size = (320, 240), color = (0,100,0), mode = "RGB") canvas.draw_image((image.open("/root/user/img/paiqiu.png")),20,0,alpha=1) canvas.draw_string(10,200, "排球已歸還", scale = 3, color = (255,255,255) , thickness = 1) canvas.draw_string(5,260, "歡迎下次使用", scale = 2.8, color = (255,255,255) , thickness = 1) v831_display_show_canvas(canvas) def _E7_B1_83_E7_90_83_E6_AD_B8_E9_82_84(): global img_recognized, i, state, canvas, people_name_list, _img_recognized_x, _img_recognized_y, _canvas_x, _canvas_y, _COCOCLOUD_SEND_REQUEST, RecordState, recordFrequency, recordResultData, SETVFLIP, SETHMIRROT, ScreenOrientation, score_threshold, CLASSNAMEFACELIST canvas.clear() if ScreenOrientation: canvas = image.new(size = (240, 320), color = (0,100,0), mode = "RGB") else: canvas = image.new(size = (320, 240), color = (0,100,0), mode = "RGB") canvas.draw_image((image.open("/root/user/img/lanqiu.png")),20,0,alpha=1) canvas.draw_string(10,200, "籃球已歸還", scale = 3, color = (255,255,255) , thickness = 1) canvas.draw_string(5,260, "歡迎下次使用", scale = 2.8, color = (255,255,255) , thickness = 1) v831_display_show_canvas(canvas) _img_recognized_x = 0 _img_recognized_y = 0 _canvas_x = 0 _canvas_y = 0 _COCOCLOUD_SEND_REQUEST = None RecordState = 0 recordFrequency = 0 recordResultData = None SETVFLIP = False SETHMIRROT = False ScreenOrientation = True score_threshold = 70 Wi_Fi_E9_80_A3_E6_8E_A5() S1.servoCtrl(75) S2.servoCtrl(70) SETVFLIP = True if cameraSize==True: camera.camera.config(size=(320,240)) else: camera.camera.config(size=(240,320)) canvas = image.new(size = (240, 320)) img_recognized = image.new(size = (240, 320)) _img_recognized_x, _img_recognized_y = 8,48 people_name_list = ["張三", "李四", "王五"] CLASSNAMEFACELIST = people_name_list class Yolo: labels = ["ganlanqiu", "lanqiu", "paiqiu", "wangqiu", "yumaoqiu", "zuqiu"] anchors = [1.19, 1.98, 2.79, 4.59, 4.53, 8.92, 8.06, 5.29, 10.32, 10.65] m = { "param": "/root/user/model/ball.param", "bin": "/root/user/model/ball.bin" } options = { "model_type": "awnn", "inputs": { "input0": (224, 224, 3) }, "outputs": { "output0": (7, 7, (1+4+len(labels))*5) }, "mean": [127.5, 127.5, 127.5], "norm": [0.0078125, 0.0078125, 0.0078125], } def __init__(self): from maix import nn from maix.nn import decoder self.model = nn.load(self.m, opt=self.options) self.decoder = decoder.Yolo2(len(self.labels), self.anchors, net_in_size=(224, 224), net_out_size=(7, 7)) def __del__(self): del self.model del self.decoder Yolo = Yolo() state = "人臉識別" while True: img_recognized = getLcdRotation(camera.capture()) img_recognized = img_recognized.crop(0, 0,224, 224) if state == "人臉識別": _E4_BA_BA_E8_87_89_E8_AD_98_E5_88_A5() elif state == "歡迎語": _E6_AD_A1_E8_BF_8E_E8_AA_9E() _E8_AA_9E_E9_9F_B3_E8_AD_98_E5_88_A5() elif state == "借球提示語": _E5_80_9F_E7_90_83_E6_8F_90_E7_A4_BA_E8_AA_9E() _E8_AA_9E_E9_9F_B3_E8_AD_98_E5_88_A5() elif state == "體育用品識別": _E9_AB_94_E8_82_B2_E7_94_A8_E5_93_81_E8_AD_98_E5_88_A5()
Send File