from future import *
from futureKOI import KOI
items = []
i = 0
items.append('rock')
items.append('paper')
items.append('scissors')
i = 0
koi = KOI(tx='P2',rx='P12',id=1)
koi.init_cls() # init classifier
koi.screen_mode(2, cmd='K6')
screen.sync = 0
while True:
screen.fill((0, 0, 0))
if sensor.btnValue("a") and sensor.btnValue("b"):
koi.cls_save_model(model="model.json",cmd='K43') # saves the classifier model
buzzer.melody(1)
else:
if sensor.btnValue("a"):
sleep(0.2)
if not sensor.btnValue("b"):
koi.cls_add_tag(id=(items[int((i % 3 + 1) - 1)]),cmd='K41') # classifier add tag
buzzer.melody(4)
else:
if sensor.btnValue("b"):
sleep(0.2)
buzzer.tone(440,0.2)
if not sensor.btnValue("a"):
i += 1
screen.text("Now training:",0,10,1,(255, 255, 255))
screen.text((items[int((i % 3 + 1) - 1)]),0,30,2,(255, 255, 255))
screen.text("Press A to add tag",0,60,1,(255, 255, 255))
screen.text("Press B for next tag",0,80,1,(255, 255, 255))
screen.text("Press A+B to save",0,100,1,(255, 255, 255))
screen.refresh()
KOI特徵分類器範例程式(模型運行)
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
koi.init_cls()
koi.cls_load_model(model="model.json",cmd='K44') # loads the classifier model
while True:
if sensor.btnValue("a"):
screen.clear()
screen.text((koi.cls_run(cmd='K42')),5,10,2,(255, 255, 255)) # displays the classified tag
screen.refresh()
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.screen_mode(2, cmd='K6')
koi.connect_wifi(router="apname" ,pwd="password" ,cmd='K50')
while True:
if sensor.btnValue("a"):
koi.baiduAI_tts(txt='"hello"' ,cmd='K78')
sleep(0.2)
if sensor.btnValue("b"):
screen.clear()
screen.text((koi.get_ip(cmd='K54')),5,10,1,(255, 255, 255))
screen.refresh()
sleep(0.2)
語音辨識
錄製wav音頻檔
koi.audio_record(name)
錄製wav音頻檔。
name: 檔案名稱(.wav)
播放wav音頻檔
koi.audio_play(name)
播放wav音頻檔。
name: 檔案名稱(.wav)
校正環境噪音
koi.audio_noisetap()
校正環境噪音,語音辨識前必須運行。
語音辨識增加命令詞
koi.speech_add_tag(tag)
增加語音辨識命令詞。
tag: 命令詞
運行語音辨識
koi.speech_run(cmd="K65")
運行語音辨識,返回命令詞。
儲存語音模型
koi.speech_save_model(file)
儲存語音模型。
file: 檔案名稱(.json)
載入語音模型
koi.speech_load_model(file)
載入語音模型。
file: 檔案名稱(.json)
語音辨識模型訓練範例程式
from future import *
from futureKOI import KOI
items = []
i = 0
items.append('rock')
items.append('paper')
items.append('scissors')
i = 0
koi = KOI(tx='P2',rx='P12',id=1)
koi.audio_noisetap()
koi.screen_mode(2, cmd='K6')
screen.sync = 0
while True:
screen.fill((0, 0, 0))
if sensor.btnValue("a") and sensor.btnValue("b"):
buzzer.melody(1)
koi.speech_save_model("speech.json")
else:
if sensor.btnValue("a"):
sleep(0.2)
if not sensor.btnValue("b"):
koi.speech_add_tag((items[int((i % 3 + 1) - 1)]))
else:
if sensor.btnValue("b"):
sleep(0.2)
buzzer.tone(440,0.2)
if not sensor.btnValue("a"):
i += 1
screen.text("Now training:",0,10,1,(255, 255, 255))
screen.text((items[int((i % 3 + 1) - 1)]),0,30,2,(255, 255, 255))
screen.text("Press A to add tag",0,60,1,(255, 255, 255))
screen.text("Press B for next tag",0,80,1,(255, 255, 255))
screen.text("Press A+B to save",0,100,1,(255, 255, 255))
screen.refresh()
語音辨識模型運行範例程式
from future import *
from futureKOI import KOI
koi = KOI(tx='P2',rx='P12',id=1)
koi.audio_noisetap()
koi.speech_load_model("speech.json")
while True:
if sensor.btnValue("a"):
screen.clear()
screen.text((koi.speech_run(cmd='K65')),5,10,2,(255, 255, 255))
screen.refresh()