python类paInt16()的实例源码

record.py 文件源码 项目:jdtext_classify 作者: zhongnanxiaoqin 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def recode(self):
        pa=PyAudio()
        stream=pa.open(format=paInt16,channels=1,rate=self.SAMPLES_RATE,input=True,frames_per_buffer=self.NUM_SAMPLES)
        save_count=0
        save_buffer=[]
        time_count=self.TIME_COUNT
        print '\n\n\n??????????????'
        while True:
            time_count-=1
            string_audio_data=stream.read(self.NUM_SAMPLES)
            audio_data=numpy.fromstring(string_audio_data,dtype=numpy.short)
            large_sample_count=numpy.sum(audio_data>self.LEVEL)
            print(numpy.max(audio_data))
            if large_sample_count>self.COUNT_NUM:
                save_count=self.SAVE_LENGTH
            else:
                save_count-=1
            if save_count<0:
                save_count=0
            if save_count>0:
                save_buffer.append(string_audio_data)
            else:
                if len(save_buffer):
                    self.Voice_String=save_buffer
                    save_buffer=[]
                    print '????????'
                    return True
            if not time_count:
                if len(save_buffer):
                    self.Voice_String=save_buffer
                    save_buffer=[]
                    print '????????'
                    return True
                else:
                    return False
vad.py 文件源码 项目:Piwho 作者: Adirockzz95 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def save_audio(data, params):
    """ Saves mic data to wav file."""

    filename = gettime()
    data = ''.join(data)
    wf = wave.open(filename + '.wav', 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(params.get_sample_size(pyaudio.paInt16))
    wf.setframerate(RATE)
    wf.writeframes(data)
    wf.close()
t1.py 文件源码 项目:Opencv_learning 作者: wjb711 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def record_wave():
  #open the input of wave
  pa = PyAudio()
  stream = pa.open(format = paInt16, channels = 1,
          rate = framerate, input = True,
          frames_per_buffer = NUM_SAMPLES)
  save_buffer = []
  count = 0
  while count &lt; TIME*4:
    #read NUM_SAMPLES sampling data
    string_audio_data = stream.read(NUM_SAMPLES)
    save_buffer.append(string_audio_data)
    count += 1
    print '.'
mic.py 文件源码 项目:jessy 作者: jessy-project 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def fetch_threshold(self):

        # TODO: Consolidate variables from the next three functions
        THRESHOLD_MULTIPLIER = 1.8
        RATE = 16000
        CHUNK = 1024

        # number of seconds to allow to establish threshold
        THRESHOLD_TIME = 1

        # prepare recording stream
        stream = self._audio.open(format=pyaudio.paInt16,
                                  channels=1,
                                  rate=RATE,
                                  input=True,
                                  frames_per_buffer=CHUNK)

        # stores the audio data
        frames = []

        # stores the lastN score values
        lastN = [i for i in range(20)]

        # calculate the long run average, and thereby the proper threshold
        for i in range(0, RATE / CHUNK * THRESHOLD_TIME):

            data = stream.read(CHUNK)
            frames.append(data)

            # save this data point as a score
            lastN.pop(0)
            lastN.append(self.get_score(data))
            average = sum(lastN) / len(lastN)

        stream.stop_stream()
        stream.close()

        # this will be the benchmark to cause a disturbance over!
        THRESHOLD = average * THRESHOLD_MULTIPLIER

        return THRESHOLD
test.py 文件源码 项目:python-avs 作者: lddias 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def start_recording(self):
        audio = pyaudio.PyAudio()

        # start Recording
        stream = audio.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)
        logger.info("recording...")

        self._audio = audio
        self._stream = stream
        self._stopped = False
        self._event = threading.Event()
start_speech_bot.py 文件源码 项目:ais-demos 作者: jaybutera 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def save_speech(data, p):
    """ Saves mic data to temporary WAV file. Returns filename of saved
        file """

    filename = 'resources/output_'+str(int(time.time()))
    # writes data to WAV file
    data = ''.join(data)
    wf = wave.open(filename + '.wav', 'wb')
    wf.setnchannels(1)
    wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
    wf.setframerate(16000)  # TODO make this value a function parameter?
    wf.writeframes(data)
    wf.close()
    return filename + '.wav'
piano.py 文件源码 项目:piano 作者: respeaker 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, pa):
        self.pa = pa
        self.event = threading.Event()
        self.wav = wave.open(os.path.join(PIANO_PATH, 'c1.wav'), 'rb')
        self.stream = self.pa.open(format=pyaudio.paInt16,
                                   channels=self.wav.getnchannels(),
                                   rate=self.wav.getframerate(),
                                   output=True,
                                   # start=False,
                                   # output_device_index=1,
                                   frames_per_buffer=CHUNK_SIZE,
                                   stream_callback=self._callback)
transcribe_streaming.py 文件源码 项目:hackfair-speech 作者: DjangoGirlsSeoul 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def record_audio(rate, chunk):
    """Opens a recording stream in a context manager."""
    audio_interface = pyaudio.PyAudio()
    audio_stream = audio_interface.open(
        format=pyaudio.paInt16,
        # The API currently only supports 1-channel (mono) audio
        # https://goo.gl/z757pE
        channels=1, rate=rate, output=False,
        input=True, frames_per_buffer=chunk,
    #input_device_index = 0,
    )

    # Create a thread-safe buffer of audio data
    buff = queue.Queue()

    # Spin up a separate thread to buffer audio data from the microphone
    # This is necessary so that the input device's buffer doesn't overflow
    # while the calling thread makes network requests, etc.
    fill_buffer_thread = threading.Thread(
        target=_fill_buffer, args=(audio_stream, buff, chunk))
    fill_buffer_thread.start()

    yield _audio_data_generator(buff)

    audio_stream.stop_stream()
    audio_stream.close()
    fill_buffer_thread.join()
    audio_interface.terminate()
# [END audio_stream]
speech_streaming.py 文件源码 项目:hackfair-speech 作者: DjangoGirlsSeoul 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def record_audio(channels, rate, chunk):
    """Opens a recording stream in a context manager."""
    audio_interface = pyaudio.PyAudio()
    audio_stream = audio_interface.open(
        format=pyaudio.paInt16, channels=channels, rate=rate,
        input=True, frames_per_buffer=chunk,
    input_device_index=1,
    )

    yield audio_stream

    audio_stream.stop_stream()
    audio_stream.close()
    audio_interface.terminate()
# [END audio_stream]
audio.py 文件源码 项目:streamtotext 作者: ibm-dev 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self,
                 audio_format=None,
                 channels=1,
                 rate=16000,
                 device_ndx=0):
        super(Microphone, self).__init__()
        audio_format = audio_format or pyaudio.paInt16
        self._format = audio_format
        self._channels = channels
        self._rate = rate
        self._device_ndx = device_ndx
        self._pyaudio = None
        self._stream = None
        self._stream_queue = None
speech.py 文件源码 项目:LastSecondSlides 作者: trishume 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def record_audio(channels, rate, chunk):
    """Opens a recording stream in a context manager."""
    audio_interface = pyaudio.PyAudio()
    audio_stream = audio_interface.open(
        format=pyaudio.paInt16, channels=channels, rate=rate,
        input=True, frames_per_buffer=chunk,
    )

    yield audio_stream

    audio_stream.stop_stream()
    audio_stream.close()
    audio_interface.terminate()
# [END audio_stream]
main.py 文件源码 项目:CodeLabs 作者: TheIoTLearningInitiative 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def start():

    CHUNK = 1024
    FORMAT = pyaudio.paInt16
    CHANNELS = 1
    RATE = 16000
    RECORD_SECONDS = 3
    WAVE_OUTPUT_FILENAME = path+'recording.wav'

    p = pyaudio.PyAudio()

    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

    print("Recording...")

    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)
        frames.append(data)

    print("Done!")

    stream.stop_stream()
    stream.close()
    p.terminate()

    wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()


    alexa()
srps2.py 文件源码 项目:CodeLabs 作者: TheIoTLearningInitiative 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self):
        # Microphone stream config.
        self.CHUNK = 1024  # CHUNKS of bytes to read each time from mic
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 16000

        self.SILENCE_LIMIT = 1  # Silence limit in seconds. The max ammount of seconds where
                           # only silence is recorded. When this time passes the
                           # recording finishes and the file is decoded

        self.PREV_AUDIO = 0.5  # Previous audio (in seconds) to prepend. When noise
                          # is detected, how much of previously recorded audio is
                          # prepended. This helps to prevent chopping the beginning
                          # of the phrase.

        self.THRESHOLD = 4500
        self.num_phrases = -1

        # These will need to be modified according to where the pocketsphinx folder is
        MODELDIR = "../../tools/pocketsphinx/model"
        DATADIR = "../../tools/pocketsphinx/test/data"

        # Create a decoder with certain model
        #config = Decoder.default_config()
        #config.set_string('-hmm', os.path.join(MODELDIR, 'en-us/en-us'))
        #config.set_string('-lm', os.path.join(MODELDIR, 'en-us/en-us.lm.bin'))
        #config.set_string('-dict', os.path.join(MODELDIR, 'en-us/cmudict-en-us.dict'))

        LMD   = "4842.lm"
        DICTD = "4842.dic"

        # Creaders decoder object for streaming data.
        #self.decoder = Decoder(config)
        self.decoder = Decoder(lm=LMD, dict=DICTD)
srps2.py 文件源码 项目:CodeLabs 作者: TheIoTLearningInitiative 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def save_speech(self, data, p):
        """
        Saves mic data to temporary WAV file. Returns filename of saved
        file
        """
        filename = 'output_'+str(int(time.time()))
        # writes data to WAV file
        data = ''.join(data)
        wf = wave.open(filename + '.wav', 'wb')
        wf.setnchannels(1)
        wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
        wf.setframerate(16000)  # TODO make this value a function parameter?
        wf.writeframes(data)
        wf.close()
        return filename + '.wav'
realtime_tdoa.py 文件源码 项目:tdoa 作者: xiongyihui 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def read_chunks(self, size):
        device_index = None
        # for i in range(self.pyaudio_instance.get_device_count()):
        #     dev = self.pyaudio_instance.get_device_info_by_index(i)
        #     name = dev['name'].encode('utf-8')
        #     print(i, name, dev['maxInputChannels'], dev['maxOutputChannels'])
        #     if dev['maxInputChannels'] >= self.channels:
        #         print('Use {}'.format(name))
        #         device_index = i
        #         break

        # if not device_index:
        #     print('can not find input device with {} channel(s)'.format(self.channels))
        #     return

        stream = self.pyaudio_instance.open(
            input=True,
            format=pyaudio.paInt16,
            channels=self.channels,
            rate=self.sample_rate,
            frames_per_buffer=size,
            stream_callback=self._callback,
            input_device_index = device_index,
        )

        while not self.quit_event.is_set():
            frames = self.queue.get()
            if not frames:
                break
            yield frames

        stream.close()
view_with_band_pass_filter.py 文件源码 项目:tdoa 作者: xiongyihui 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def start(self, quit_event=None, show=None):
        stream = self.pyaudio_instance.open(
            rate=RATE,
            frames_per_buffer=FRAMES,
            format=pyaudio.paInt16,
            channels=2,
            input=True,
            # output_device_index=1,
            stream_callback=self._callback)

        self.event.clear()
        if not quit_event:
            quit_event = threading.Event()

        phat = [0] * (2 * direction_n + 1)
        while not (quit_event.is_set() or self.event.is_set()):
            try:
                data = self.queue.get()

                buf = np.fromstring(data, dtype='int16')
                tau, cc = gcc_phat(buf[0::2] * window, buf[1::2] * window, fs=RATE, max_tau=max_tau, interp=1)
                theta = math.asin(tau / max_tau) * 180 / math.pi
                print('\ntheta: {}'.format(int(theta)))

                for i, v in enumerate(cc):
                    phat[i] = int(v * 512)

                if show:
                    show(phat)
                # print [l for l in level]
            except KeyboardInterrupt:
                break

        stream.close()
view.py 文件源码 项目:tdoa 作者: xiongyihui 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def start(self, quit_event=None, show=None):
        stream = self.pyaudio_instance.open(
            rate=RATE,
            frames_per_buffer=FRAMES,
            format=pyaudio.paInt16,
            channels=2,
            input=True,
            # output_device_index=1,
            stream_callback=self._callback)

        self.event.clear()
        if not quit_event:
            quit_event = threading.Event()

        phat = [0] * (2 * direction_n + 1)
        while not (quit_event.is_set() or self.event.is_set()):
            try:
                data = self.queue.get()

                buf = np.fromstring(data, dtype='int16')
                tau, cc = gcc_phat(buf[0::2] * window, buf[1::2] * window, fs=RATE, max_tau=max_tau, interp=1)
                theta = math.asin(tau / max_tau) * 180 / math.pi
                print('\ntheta: {}'.format(int(theta)))

                for i, v in enumerate(cc):
                    phat[i] = int(v * 512)

                if show:
                    show(phat)
                # print [l for l in level]
            except KeyboardInterrupt:
                break

        stream.close()
SWHear.py 文件源码 项目:Python-GUI-examples 作者: swharden 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def valid_test(self,device,rate=44100):
        """given a device ID and a rate, return TRUE/False if it's valid."""
        try:
            self.info=self.p.get_device_info_by_index(device)
            if not self.info["maxInputChannels"]>0:
                return False
            stream=self.p.open(format=pyaudio.paInt16,channels=1,
               input_device_index=device,frames_per_buffer=self.chunk,
               rate=int(self.info["defaultSampleRate"]),input=True)
            stream.close()
            return True
        except:
            return False
SWHear.py 文件源码 项目:Python-GUI-examples 作者: swharden 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def stream_start(self):
        """adds data to self.data until termination signal"""
        self.initiate()
        print(" -- starting stream")
        self.keepRecording=True # set this to False later to terminate stream
        self.data=None # will fill up with threaded recording data
        self.fft=None
        self.dataFiltered=None #same
        self.stream=self.p.open(format=pyaudio.paInt16,channels=1,
                      rate=self.rate,input=True,frames_per_buffer=self.chunk)
        self.stream_thread_new()
transcribe_streaming.py 文件源码 项目:appbackendapi 作者: codesdk 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def record_audio(rate, chunk):
    """Opens a recording stream in a context manager."""
    # Create a thread-safe buffer of audio data
    buff = queue.Queue()

    audio_interface = pyaudio.PyAudio()
    audio_stream = audio_interface.open(
        format=pyaudio.paInt16,
        # The API currently only supports 1-channel (mono) audio
        # https://goo.gl/z757pE
        channels=1, rate=rate,
        input=True, frames_per_buffer=chunk,
        # Run the audio stream asynchronously to fill the buffer object.
        # This is necessary so that the input device's buffer doesn't overflow
        # while the calling thread makes network requests, etc.
        stream_callback=functools.partial(_fill_buffer, buff),
    )

    yield _audio_data_generator(buff)

    audio_stream.stop_stream()
    audio_stream.close()
    # Signal the _audio_data_generator to finish
    buff.put(None)
    audio_interface.terminate()
# [END audio_stream]


问题


面经


文章

微信
公众号

扫码关注公众号