from AI import CustomModel
CustomModel(labels,path)
參數 |
值 |
描述 |
labels |
列表 |
標籤列表,必填 |
path |
字串 |
模型路徑,必填 |
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import CustomModel
Screen = Screen()
camera = cameraPython()
labels = ["load", "unload", "left", "right", "forward", "stop"]
path = "/root/preset/model/autologistics"
CustomModel = CustomModel(labels,path)
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = CustomModel.load(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0),thickness=1)
canvas.draw_string(i["X"],i["Y"]-20,(str(i["name"])+":"+str(i["score"])),color=(255,0,0))
Screen.show(canvas,48,8)
其中i["X"]表示識別結果的檢測框起點X軸坐標,i["Y"]表示識別結果的檢測框起點Y軸坐標,i["W"]表示識別結果的檢測框寬度,i["H"]表示識別結果的檢測框高度,i["name"]表示識別結果的名稱,i["score"]表示識別結果的置信度,i["middleX"]表示識別結果的檢測框中心點的X軸坐標,i["middleY"]表示識別結果的檢測框中心點的Y軸坐標。
from AI import EdgeDetection
EdgeDetection()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import EdgeDetection
Screen = Screen()
camera = cameraPython()
EdgeDetection = EdgeDetection()
while True:
img = camera.capture()
img = img.crop(48,8,224,224)
canvas = EdgeDetection.load(img)
Screen.show(canvas,48,8)
from AI import Facedetection
Facedetection()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import Facedetection
Screen = Screen()
camera = cameraPython()
face = Facedetection()
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = face.load(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0), thickness=1)
canvas.draw_string(5,5,str(len(data)), scale = 1, color = (255,255,255) , thickness = 1)
Screen.show(canvas,48,8)
from AI import FaceRecognition
FaceRecognition()
載入人臉檢測模型,獲取人臉資訊
loadmodel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
錄入人臉資訊保存到本地
add(name,feature,dataPath)
參數 |
值 |
描述 |
name |
字串 |
識別到的人臉名稱,必填 |
feature |
列表 |
人臉資料,必填 |
dataPath |
字串 |
保存路徑,選填,預設為"/root/user/model/recorded_face_features.py" |
刪除本地指定人臉資料
remove(name,dataPath)
參數 |
值 |
描述 |
name |
字串 |
要刪除的人臉名稱,必填 |
dataPath |
字串 |
保存路徑,選填,預設為"/root/user/model/recorded_face_features.py" |
載入人臉採集時保存到本地的人臉資訊資料
loadface(DataPath)
參數 |
值 |
描述 |
dataPath |
字串 |
保存路徑,選填,預設為""/root/user/model/recorded_face_features.py" |
import time
from cocopiPython import cameraPython,Screen
from AI import FaceRecognition
from CocoPi import BUTTON
key_C = BUTTON(13)
key_D = BUTTON(7)
Screen = Screen()
camera = cameraPython()
FaceRecognition = FaceRecognition()
People_list = ["小明","小紅","小剛","小花"]
FaceNum = 0
faceScore = 0
color = (255,0,0)
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = FaceRecognition.loadmodel(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=color,thickness=1)
if key_C.is_pressed() and FaceNum < len(People_list):
while not key_C.is_pressed()== False:
time.sleep(0.01)
FaceRecognition.add(People_list[len(FaceRecognition)],i["feature"])
canvas.draw_rectangle(0,0,320,20,color=(0,0,0),thickness=-1)
canvas.draw_string(0,0,"已採集"+str(People_list[FaceNum])+"人臉數據",scale=1,color=(255,255,255),thickness=1)
Screen.show(canvas,48,8)
FaceNum = FaceNum + 1
time.sleep(1)
if key_D.is_pressed() and FaceNum > 0:
while not key_D.is_pressed()== False:
time.sleep(0.01)
FaceRecognition.remove(People_list[len(FaceRecognition)-1])
canvas.draw_rectangle(0,0,320,20,color=(0,0,0),thickness=-1)
canvas.draw_string(0,0,"已刪除"+str(People_list[FaceNum-1])+"人臉數據",scale=1,color=(255,255,255),thickness=1)
Screen.show(canvas,48,8)
FaceNum = FaceNum - 1
time.sleep(1)
Screen.show(canvas,48,8)
from cocopiPython import cameraPython,Screen
from AI import FaceRecognition
Screen = Screen()
camera = cameraPython()
FaceRecognition = FaceRecognition()
faceScore = 0
color = (255,0,0)
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = FaceRecognition.loadmodel(canvas)
FaceRecognition.loadface()
for i in data:
faceScore = round(i["recognizeScore"],2)
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=color,thickness=1)
canvas.draw_string(i["X"],i["Y"],str(i["name"])+":"+str(faceScore),scale=1,color=color,thickness=1)
if faceScore > 80:
color = (0,255,0)
else:
color = (255,0,0)
Screen.show(canvas,48,8)
import time
from cocopiPython import cameraPython,Screen
from AI import FaceRecognition
from CocoPi import BUTTON
key_C = BUTTON(13)
key_D = BUTTON(7)
Screen = Screen()
camera = cameraPython()
FaceRecognition = FaceRecognition()
People_list = ["小明","小紅","小剛","小花"]
FaceNum = 0
faceScore = 0
color = (255,0,0)
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = FaceRecognition.loadmodel(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=color,thickness=1)
if key_C.is_pressed() and FaceNum < len(People_list):
while not key_C.is_pressed()== False:
time.sleep(0.01)
FaceRecognition.add(People_list[len(FaceRecognition)],i["feature"])
canvas.draw_rectangle(0,0,320,20,color=(0,0,0),thickness=-1)
canvas.draw_string(0,0,"已採集"+str(People_list[FaceNum])+"人臉數據",scale=1,color=(255,255,255),thickness=1)
Screen.show(canvas,48,8)
FaceNum = FaceNum + 1
time.sleep(1)
faceScore = round(i["recognizeScore"],2)
if faceScore > 70:
color = (0,255,0)
canvas.draw_string(i["X"],i["Y"],str(i["name"])+":"+str(faceScore),scale=1,color=color,thickness=1)
else:
color = (255,0,0)
if key_D.is_pressed() and FaceNum > 0:
while not key_D.is_pressed()== False:
time.sleep(0.01)
FaceRecognition.remove(People_list[len(FaceRecognition)-1])
canvas.draw_rectangle(0,0,320,20,color=(0,0,0),thickness=-1)
canvas.draw_string(0,0,"已刪除"+str(People_list[FaceNum-1])+"人臉數據",scale=1,color=(255,255,255),thickness=1)
Screen.show(canvas,48,8)
FaceNum = FaceNum - 1
time.sleep(1)
Screen.show(canvas,48,8)
from AI import Digitalecognition
Digitalecognition()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import Digitalecognition
Screen = Screen()
camera = cameraPython()
Digit = Digitalecognition()
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = Digit.load(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0),thickness=1)
canvas.draw_string(i["X"],i["Y"]-20,(str(i["name"])+":"+str(i["score"])),color=(255,0,0))
Screen.show(canvas,48,8)
from AI import ObjectDetection
ObjectDetection()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import ObjectDetection
Screen = Screen()
camera = cameraPython()
object = ObjectDetection()
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = object.load(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0),thickness=1)
canvas.draw_string(i["X"],i["Y"]-20,(str(i["name"])+":"+str(i["score"])),color=(255,0,0))
Screen.show(canvas,48,8)
from AI import RockPaperScissors
RockPaperScissors()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import RockPaperScissors
Screen = Screen()
camera = cameraPython()
RockPaperScissors = RockPaperScissors()
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = RockPaperScissors.loadModel(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0),thickness=1)
canvas.draw_string(i["X"],i["Y"]-10,(str(i["name"])+":"+str(i["score"])),color=(255,0,0))
Screen.show(canvas,48,8)
from AI import SelfLearning
SelfLearning(lables)
參數 |
值 |
描述 |
lables |
清單 |
要識別的物體名稱清單,必填 |
addClass(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
trainSave(path)
參數 |
值 |
描述 |
path |
字串 |
保存路徑,選填,預設為“/root/user/model/module.bin” |
loadModel(img,path)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
path |
字串 |
保存路徑,選填,預設為“/root/user/model/module.bin” |
import time
from cocopiPython import cameraPython,Screen
from AI import SelfLearning
from CocoPi import BUTTON
key_C = BUTTON(13)
key_D = BUTTON(7)
Screen = Screen()
camera = cameraPython()
lables=(['滑鼠','鍵盤','水杯'])
SL = SelfLearning(lables)
is_train = 0
count = 0
str_display = "按C鍵添加類別,按D鍵進行學習"
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
if key_C.is_pressed():
while not key_C.is_pressed()== False:
time.sleep(0.01)
if count < len(lables):
time.sleep(1)
SL.addClass(canvas)
count += 1
if key_D.is_pressed():
while not key_D.is_pressed()== False:
time.sleep(0.01)
if count >= len(lables) and is_train == 0:
time.sleep(1)
SL.train()
is_train = 1
str_display = "學習已完成"
canvas.draw_string(0,0,str_display,scale=1,color=(255,0,0),thickness=1)
canvas.draw_string(0,16,"採集次數:"+str(count),scale=1,color=(255,0,0),thickness=1)
Screen.show(canvas,48,8)
from cocopiPython import cameraPython,Screen
from AI import SelfLearning
Screen = Screen()
camera = cameraPython()
SL= SelfLearning(['滑鼠','鍵盤','水杯'])
SL.load()
while True:
img = camera.capture()
canvas = img.crop(48,8,224,224)
data = SL.predict(canvas)
canvas.draw_string(0,5,(str(data["name"])+":"+str(data["score"])),scale=1,color=(255,0,0),thickness=1)
Screen.show(canvas,48,8)
from AI import PinyinRecognition
PinyinRecognition(keywords)
參數 |
值 |
描述 |
keywords |
清單 |
拼音參數,文字的拼音與音調,必填 |
getresult()
from cocopiPython import Screen
from AI import PinyinRecognition
Screen = Screen()
canvas = Screen.create()
keywords = [["ni3 hao3",0.1],["hao3 de1",0.1],["wan3 shang4 hao3",0.1]]
PinyinRecognition = PinyinRecognition(keywords)
data =""
while True:
canvas.clear()
data = str(PinyinRecognition.getResult())
print("jieguo:"+str(data))
canvas.draw_string(0,0, "請對準麥克風說出指令", scale = 2, color = (255,0,0) , thickness = 1)
if data != "None":
canvas.draw_string(0, 30, "拼音識別結果:", scale=2, color=(255,0,0), thickness = 1)
canvas.draw_string(30, 60, (data), scale=2, color=(255,255,255), thickness = 1)
Screen.show(canvas)
from AI import PlateRecognition
PlateRecognition()
loadModel(img)
參數 |
值 |
描述 |
img |
圖片 |
要識別的Image對象,必填 |
from cocopiPython import cameraPython,Screen
from AI import PlateRecognition
Screen = Screen()
camera = cameraPython()
PlateRecognition = PlateRecognition()
while True:
canvas = camera.capture()
canvas = canvas.crop(48,8,224,224)
data = PlateRecognition.load(canvas)
for i in data:
canvas.draw_rectangle(i["X"],i["Y"],i["X"]+i["W"],i["Y"]+i["H"],color=(255,0,0),thickness=1)
canvas.draw_string(i["X"],i["Y"]-10,(str(i["name"])),color=(255,0,0))
Screen.show(canvas,48,8)