python类Microphone()的实例源码

main.py 文件源码 项目:Onyx 作者: OnyxProject 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def detected_callback(self):
        self.detector.terminate()
        play_wav(onyx.__path__[0] + "/client/speech/resources/ding.wav")

        r = sr.Recognizer()

        with sr.Microphone() as source:
            print("Say something!")
            audio = r.listen(source, timeout=1, phrase_time_limit=5)

        try:
            result = stt.execute(audio, language=self.lang)
            print("You said: " + result)

            def create_ws():
                def onConnected(event=None):
                    print ("Sending message...")
                    payload = {
                            'utterances': [result]
                    }
                    ws.emit(Message('recognizer_loop:utterance', payload))
                    t.close()
                    #self.detector.start(self.detected_callback)


                ws = WebsocketClient()
                ws.on('connected', onConnected)
                # This will block until the client gets closed
                ws.run_forever()

            t = threading.Thread(target=create_ws)
            t.start()
            time.sleep(2)
            self.detector.start(self.detected_callback)


        except sr.UnknownValueError:
            print("Speech Recognition could not understand audio")
        except sr.RequestError as e:
            print("Could not request results from Speech Recognition service; {0}".format(e))
main.py 文件源码 项目:furbymt 作者: starfys 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def get_command():
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source)
    # recognize speech using Google Speech Recognition
    try:
        # for testing purposes, we're just using the default API key
        # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
        # instead of `r.recognize_google(audio)`
        tts_result = r.recognize_google(audio)
        print('Got ' + tts_result)
    except sr.UnknownValueError:
        print("Google Speech Recognition could not understand audio")
        return None
    except sr.RequestError as e:
        print("Could not request results from Google Speech Recognition service; {0}".format(e))
        return None
    #Read keys from json
    with open('keys.json','r') as keys_file:
        keys = json.load(keys_file)
    apiai_client_token = keys['apiai_client_token']
    #Create a session
    session = requests.Session()
    session.headers.update({"Authorization":"Bearer {}".format(apiai_client_token),
                        "Content-Type":"application/json; charset=utf-8"})
    #API.ai
    API_BASE_URL="https://api.api.ai/v1/"
    #Make a request
    return session.get(API_BASE_URL+"query", params={"query": tts_result,"v":"20170204","sessionId":"furby","lang":"en"}).json()["result"]
ears.py 文件源码 项目:alan 作者: camtaylor 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def ears():
  # obtain audio from the microphone
  r = sr.Recognizer()
  with sr.Microphone() as source:
    audio = r.listen(source)
  # recognize speech using Google Speech Recognition
  try:
    return r.recognize_google(audio)
  except sr.UnknownValueError:
    return ears() 
  except sr.RequestError as e:
    return "I do not understand; {0}".format(e)
audioInput.py 文件源码 项目:Nancy-VA--MacOS 作者: itz-azhar 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def listen():
        speak('Listening!')
        with speech_recognition.Microphone() as source:
            recognizer.adjust_for_ambient_noise(source)
            audio = recognizer.listen(source)
        try:
            return recognizer.recognize_google(audio)
        except speech_recognition.UnknownValueError:
            notify(message="Could not understand audio")
        except speech_recognition.RequestError as e:
            notify(message="Connection Problem")
        return ""
voicecontrol.py 文件源码 项目:The-Machine 作者: Jo-Dan 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_speech():
    with sr.Microphone() as source:
        r.adjust_for_ambient_noise(source)
        print '\n>>>',
        audio = r.listen(source)
    print ">",
    try:
        speech = r.recognize_google(audio)
    except:
        speech = "No input detected."
    print speech
    return speech
realtimerecognize.py 文件源码 项目:audiolearning 作者: jingwang3235 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def listen_translate():
    while(True):
        # obtain audio from the microphone
        r = sr.Recognizer()
        with sr.Microphone(sample_rate=8000) as source:
            print("Say something!")
    #         print(5),
    #         time.sleep(1)
    #         print(4),
    #         time.sleep(1)
    #         print(3),
    #         time.sleep(1)
    #         print(2),
    #         time.sleep(1)  
    #         print(1),     
    #         time.sleep(1)  
            audio = r.listen(source)#,timeout=5,phrase_time_limit=0.05

    #     r = sr.Recognizer()
    #     with sr.AudioFile('./english.wav') as source:
    #         audio = r.record(source)  # read the entire audio file

        # write audio to a WAV file    ``
with open("microphone-results.wav", "wb") as f:
        f.write(audio.get_wav_data())

    # recognize speech using Sphinx
    try:
        print("Sphinx thinks you said :" + r.recognize_sphinx(audio))
    except sr.UnknownValueError:
        print("Sphinx could not understand audio")
    except sr.RequestError as e:
        print("Sphinx error; {0}".format(e))

```

realtimerecognize.py 文件源码 项目:audiolearning 作者: jingwang3235 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def listen_and_recognize():           
    r = sr.Recognizer()
    m = sr.Microphone(sample_rate=8000)
    r.listen_in_background(m,callback,phrase_time_limit=1)

    while(True):
        lastlen=0
        if len(audiolist)==0:        
            time.sleep(10)
            continue
        if lastlen==len(audiolist):
            time.sleep(10)
            continue
        output = wave.open('microphone-results.wav', 'wb')
        output.setnchannels(1)
        setparam=False
        para=None
        for audio in audiolist:
            with open("temps.wav", "wb") as f:
                f.write(audio.get_wav_data())       
            temps = wave.open('temps.wav', 'rb') 
            #print temps.getparams()
            if not setparam:
                para=temps.getparams()
                output.setparams(para)   
                setparam=True     
            output.writeframes(temps.readframes(temps.getnframes()))   

        output.close() 
#         output = wavefile.open('microphone-results.wav', 'rb')
#         outputaudio=sr.AudioData(output.readframes(output.getnframes()),para[2],para[1])
#         translate(r,outputaudio)
        #baidu('microphone-results.wav')
        lastlen=len(audiolist) 
        time.sleep(10)
properties.py 文件源码 项目:Lab-Assistant 作者: SPARC-Auburn 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def processcommand(self, usermsg, source):
        """
        Processes command from user and deals with wake word detection
        Returns True if Response was when assistant is awoken
        :param usermsg: The message that the user inputted
        :param source: Microphone audio source
        """
        print ("< " + usermsg)
        wake_words = ["hey", "ok", "okay", "yo", "you", "hi"]
        awake = False
        for word in wake_words:
            if self.userCommand == word + " " + self.name:
                awake = True
        if awake:
            self.playsound("start.mp3")
            self.speak("Yes?")
            print ("\tWaiting for command..")
            response_heard = False
            while not response_heard:
                print ("\tThreshold: " + str(self.r.energy_threshold))
                print ("\tWaiting for command...")
                try:
                    audio = self.r.listen(source, timeout=5)
                    print("...")
                    self.playsound("end.mp3")
                    try:
                        self.userCommand = self.r.recognize_google(audio)
                        self.userCommand = self.userCommand.lower()
                        response_heard = True
                        print ("< " + self.userCommand)
                    except sr.UnknownValueError:
                        print("\tUnknown Value from Google, or nothing heard")
                    except sr.RequestError as e:
                        print("\tCould not request results from Google Speech Recognition service; {0}".format(e))
                    except Exception as e:
                        print (str(e))
                except Exception:
                    print ("\tHeard nothing")
                    pass
            return True
        else:
            for word in wake_words:
                if self.userCommand.__contains__(word + " " + self.name):
                    print ("\tGetting command..")
                    self.userCommand = self.userCommand.split(word + " " + self.name + " ")[1]
                    return True
            return False
boot.py 文件源码 项目:stephanie-va 作者: SlapBot 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def initiate(self):
        print("Stephanie is on and loading, wait for the beep sound to give your command.")
        if self.c.config.getboolean("APPLICATION", "update_check"):
            self.updater.check_for_update()
        self.status = True
        if self.c.config.getboolean("SYSTEM", "wake_up_engine"):
            self.status = False
            self.active = False
        if self.c.config.getboolean("SYSTEM", "always_on_engine"):
            self.status = False
            self.active = False
        r = sr.Recognizer()
        act = Activity(sr, r, self.events)
        assistant = VirtualAssistant(sr, r, self.events)
        if self.c.config.getboolean("SYSTEM", "wake_up_engine"):
            while not self.active:
                with sr.Microphone() as source:
                    self.active = act.check(source)
                    self.status = self.active
                    self.events.sleep_status = not self.status
                    if self.active:
                        self.speaker.speak("How may I help you?")
                        while self.status:
                            with sr.Microphone() as source:
                                assistant.main(source)
                                if self.events.active_status:
                                    self.status = False
                                    self.active = True
                                elif self.events.sleep_status:
                                    self.status = False
                                    self.active = False
        elif self.c.config.getboolean("SYSTEM", "always_on_engine"):
            while not self.active:
                with sr.Microphone() as source:
                    self.active = act.check_always_on(source)
                    self.status = self.active
                    if self.active:
                        while self.status:
                            with sr.Microphone() as source:
                                assistant.main(source)
                                self.status = False
                                self.active = False
                                if self.events.active_status:
                                    self.status = False
                                    self.active = True
        else:
            self.speaker.speak("How may I help you?")
            while self.status:
                with sr.Microphone() as source:
                    assistant.main(source)
                    if self.events.active_status:
                        self.status = False
asus.py 文件源码 项目:Face-Recognition-for-Mobile-Robot 作者: gagolucasm 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def callback(self,data):
        i=0
    rg=spr.Recognizer()
        try:
            frame = self.bridge.imgmsg_to_cv2(data, "bgr8")
            frame = libs.resize(frame, width=600)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            (rects, i, facess) = et.track(gray, i)
            for rect in rects:
                cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 0), 2)
            if facess != []:
                for face in facess:
                    pred, conf = recognizer.predict(face)
                    if conf < 120:
                        print "Reconozco a Lucas con una confianza de {}".format(conf)
            self.num=self.num+1
            if self.num==10:
                            self.engine.say('Hi ')
                self.engine.say( list(dictid.keys())[list(dictid.values()).index(pred)])
                self.engine.runAndWait()
                with spr.Microphone() as source:
                    rg.adjust_for_ambient_noise(source)
                    print 'Escuchando'
                    audio=rg.listen(source)
                    try:
                        respuesta= rg.recognize_sphinx(audio)
                        print respuesta
                        if respuesta!='no':
                            self.engine.say('OKEY ')
                            self.engine.say('Getting')
                            self.engine.say('new')
                            self.engine.say('data')
                            self.engine.runAndWait()
                    except spr.UnknownValueError:
                        print 'error'

                    else:
                        print "Desconocido"
            cv2.imshow("Tracking", frame)
            cv2.waitKey(1)
        except CvBridgeError as e:
            print(e)
controller.py 文件源码 项目:furbymt 作者: starfys 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get_command():
    with sr.Microphone() as source:
        print("Say something!")
        audio = r.listen(source)
    # recognize speech using Google Speech Recognition
    try:
        # for testing purposes, we're just using the default API key
        # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
        # instead of `r.recognize_google(audio)`
        tts_result = r.recognize_google(audio)
    except sr.UnknownValueError:
        print("Google Speech Recognition could not understand audio")
        exit()
    except sr.RequestError as e:
        print("Could not request results from Google Speech Recognition service; {0}".format(e))
        exit()
    #Read keys from json
    with open('keys.json','r') as keys_file:
        keys = json.load(keys_file)
    apiai_client_token = keys['apiai_client_token']
    #Create a session
    session = requests.Session()
    session.headers.update({"Authorization":"Bearer {}".format(apiai_client_token),
                        "Content-Type":"application/json; charset=utf-8"})
    #API.ai
    API_BASE_URL="https://api.api.ai/v1/"
    #Make a request
    return session.get(API_BASE_URL+"query", params={"query": tts_result,"v":"20170204","sessionId":"furby","lang":"en"}).json()["result"]

#bee_movie, 
#get_date, 
#get_forecast, 
#get_fortune, 
#prompt_name (for get lucky number), 
#get_stallman, 
#torture, 
#get_time, 
#prompt_question (for wolfram query)
#
# joke
# dad joke
# love
# music
__sampleCheck.py 文件源码 项目:PYSHA 作者: shafaypro 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def write_by_speak(self):
        r = sr.Recognizer()
        with sr.Microphone() as source:
            r.adjust_for_ambient_noise(source, duration=1)
            # print(r.energy_threshold)
            # print("Chucking rate: ", source.CHUNK)
            # print("format rate :", source.format)  # Debuggin purpose
            # CHUNK = 1024
            # FORMAT = pyaudio.paInt16  # the Format is picked up from the pyaudio
            # CHANNELS = 2  # The Cross Channels
            # # RATE = 44100
            # source.CHUNK = CHUNK
            # source.format = FORMAT  # FORMATING THE SOURCE FILE
            # print(dir(source))
            print("Say something!...")
            # print(r.energy_threshold)
            r.energy_threshold += 280
            # # print(r.adjust_for_ambient_noise(source,duration=1))
            audio = r.listen(source)

            # Speech recognition using Google Speech Recognition
        try:
            print("Parsing ...")  # Debugging To
            # for testing purposes, we're just using the default API key
            # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
            # instead of `r.recognize_google(audio)`
            # print(r.energy_threshold )
            # print(help(r.recognize_google))
            # text = r.recognize_google(audio, language='en-US')
            text = r.recognize_google(audio, language='en-GB')  # Recognizing the command through the google
            # r.re
            # r.re
            print("You said: " + text)
            return text
        except sr.UnknownValueError:
            print("Google Speech Recognition could not understand audio")
            return
        except sr.RequestError as e:
            print("Could not request results from Google Speech Recognition service; {0}".format(e))
            return
        except sr.HTTPError as e:
            print("Couldn't connect to the websites perhaps , Hyper text transfer protocol error; {0}".format(e))
            return  # returning for the debugging


问题


面经


文章

微信
公众号

扫码关注公众号