• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python session.SessionManager类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mycroft.session.SessionManager的典型用法代码示例。如果您正苦于以下问题:Python SessionManager类的具体用法?Python SessionManager怎么用?Python SessionManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了SessionManager类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: process_wake_word

    def process_wake_word(self, audio, timer):
        hyp = self.mycroft_recognizer.transcribe(audio.frame_data,
                                                 self.metrics)

        if self.mycroft_recognizer.contains(hyp):
            extractor = WordExtractor(audio, self.mycroft_recognizer,
                                      self.metrics)
            timer.lap()
            extractor.calculate_range()
            self.metrics.timer("mycroft.recognizer.extractor.time_s",
                               timer.lap())
            audio_before = extractor.get_audio_data_before()
            self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
                               self._audio_length(audio_before))
            audio_after = extractor.get_audio_data_after()
            self.metrics.timer("mycroft.recognizer.audio_extracted.length_s",
                               self._audio_length(audio_after))

            SessionManager.touch()
            payload = {
                'utterance': hyp.hypstr,
                'session': SessionManager.get().session_id,
                'pos_begin': extractor.begin,
                'pos_end': extractor.end
            }
            self.emitter.emit("recognizer_loop:wakeword", payload)

            try:
                self.transcribe([audio_before, audio_after])
            except sr.UnknownValueError:
                self.__speak("Go ahead")
                self.state.skip_wakeword = True
                self.metrics.increment("mycroft.wakeword")
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:33,代码来源:listener.py


示例2: wake_up

 def wake_up(self, audio):
     if self.wakeup_recognizer.is_recognized(audio.frame_data,
                                             self.metrics):
         SessionManager.touch()
         self.state.sleeping = False
         self.__speak(mycroft.dialog.get("i am awake", self.stt.lang))
         self.metrics.increment("mycroft.wakeup")
开发者ID:ChristopherRogers1991,项目名称:mycroft-core,代码行数:7,代码来源:listener.py


示例3: process_wake_up

 def process_wake_up(self, audio):
     if self.wakeup_recognizer.is_recognized(audio.frame_data,
                                             self.metrics):
         SessionManager.touch()
         self.state.sleeping = False
         self.__speak("I'm awake.")  # TODO: Localization
         self.metrics.increment("mycroft.wakeup")
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:7,代码来源:listener.py


示例4: process

    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.wakeword_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warning("Audio too short to be processed")
        else:
            stopwatch = Stopwatch()
            with stopwatch:
                transcription = self.transcribe(audio)
            if transcription:
                ident = str(stopwatch.timestamp) + str(hash(transcription))
                # STT succeeded, send the transcribed speech on for processing
                payload = {
                    'utterances': [transcription],
                    'lang': self.stt.lang,
                    'session': SessionManager.get().session_id,
                    'ident': ident
                }
                self.emitter.emit("recognizer_loop:utterance", payload)
                self.metrics.attr('utterances', [transcription])
            else:
                ident = str(stopwatch.timestamp)
            # Report timing metrics
            report_timing(ident, 'stt', stopwatch,
                          {'transcription': transcription,
                           'stt': self.stt.__class__.__name__})
开发者ID:Dark5ide,项目名称:mycroft-core,代码行数:31,代码来源:listener.py


示例5: process_skip_wake_word

 def process_skip_wake_word(self, audio):
     SessionManager.touch()
     try:
         self.transcribe([audio])
     except sr.UnknownValueError:
         logger.warn("Speech Recognition could not understand audio")
         self.__speak("Sorry, I didn't catch that.")
         self.metrics.increment("mycroft.recognizer.error")
     self.state.skip_wakeword = False
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:9,代码来源:listener.py


示例6: process_audio

 def process_audio(self, audio):
     SessionManager.touch()
     payload = {
         'utterance': self.mycroft_recognizer.key_phrase,
         'session': SessionManager.get().session_id,
     }
     self.emitter.emit("recognizer_loop:wakeword", payload)
     try:
         self.transcribe([audio])
     except sr.UnknownValueError:  # TODO: Localization
         logger.warn("Speech Recognition could not understand audio")
         self.__speak("Sorry, I didn't catch that.")
开发者ID:Acidburn0zzz,项目名称:mycroft-core,代码行数:12,代码来源:listener.py


示例7: process

    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.wakeword_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warning("Audio too short to be processed")
        else:
            self.transcribe(audio)
开发者ID:aatchison,项目名称:mycroft-core,代码行数:12,代码来源:listener.py


示例8: process

    def process(self, audio):
        SessionManager.touch()
        payload = {
            'utterance': self.mycroft_recognizer.key_phrase,
            'session': SessionManager.get().session_id,
        }
        self.emitter.emit("recognizer_loop:wakeword", payload)

        if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
            LOG.warn("Audio too short to be processed")
        elif connected():
            self.transcribe(audio)
        else:
            self.__speak("Mycroft seems not to be connected to the Internet")
开发者ID:forslund,项目名称:mycroft-core,代码行数:14,代码来源:listener.py


示例9: transcribe

 def transcribe(self, audio):
     text = None
     try:
         # Invoke the STT engine on the audio clip
         text = self.stt.execute(audio).lower().strip()
         LOG.debug("STT: " + text)
     except sr.RequestError as e:
         LOG.error("Could not request Speech Recognition {0}".format(e))
     except ConnectionError as e:
         LOG.error("Connection Error: {0}".format(e))
         self.emitter.emit("recognizer_loop:no_internet")
     except HTTPError as e:
         if e.response.status_code == 401:
             text = "pair my device"  # phrase to start the pairing process
             LOG.warning("Access Denied at mycroft.ai")
     except Exception as e:
         LOG.error(e)
         LOG.error("Speech Recognition could not understand audio")
     if text:
         # STT succeeded, send the transcribed speech on for processing
         payload = {
             'utterances': [text],
             'lang': self.stt.lang,
             'session': SessionManager.get().session_id
         }
         self.emitter.emit("recognizer_loop:utterance", payload)
         self.metrics.attr('utterances', [text])
开发者ID:aatchison,项目名称:mycroft-core,代码行数:27,代码来源:listener.py


示例10: transcribe

    def transcribe(self, audio_segments):
        utterances = []
        threads = []
        if connected():
            for audio in audio_segments:
                if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
                    logger.debug("Audio too short to send to STT")
                    continue

                target = self._create_remote_stt_runnable(audio, utterances)
                t = threading.Thread(target=target)
                t.start()
                threads.append(t)

            for thread in threads:
                thread.join()
            if len(utterances) > 0:
                payload = {
                    'utterances': utterances,
                    'session': SessionManager.get().session_id
                }
                self.emitter.emit("recognizer_loop:utterance", payload)
                self.metrics.attr('utterances', utterances)
            else:
                raise sr.UnknownValueError
        else:  # TODO: Localization
            self.__speak("This device is not connected to the Internet")
开发者ID:bisaysavath,项目名称:mycroft-core,代码行数:27,代码来源:listener.py


示例11: publish

 def publish(self, events):
     if 'session_id' not in events:
         session_id = SessionManager.get().session_id
         events['session_id'] = session_id
     if self.enabled:
         requests.post(
             self.url,
             headers={'Content-Type': 'application/json'},
             data=json.dumps(events), verify=False)
开发者ID:seymour-bootay,项目名称:mycroft-core,代码行数:9,代码来源:__init__.py


示例12: try_consume_audio

    def try_consume_audio(self):
        timer = Stopwatch()
        hyp = None
        audio = self.queue.get()
        self.metrics.timer("mycroft.recognizer.audio.length_s", self._audio_length(audio))
        self.queue.task_done()
        timer.start()
        if self.state.sleeping:
            hyp = self.wakeup_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)
            if hyp and hyp.hypstr:
                logger.debug("sleeping recognition: " + hyp.hypstr)
            if hyp and hyp.hypstr.lower().find("wake up") >= 0:
                SessionManager.touch()
                self.state.sleeping = False
                self.__speak("I'm awake.")  # TODO: Localization
                self.metrics.increment("mycroft.wakeup")
        else:
            if not self.state.skip_wakeword:
                hyp = self.ww_recognizer.transcribe(audio.get_wav_data(), metrics=self.metrics)

            if hyp and hyp.hypstr.lower().find("mycroft") >= 0:
                extractor = WakewordExtractor(audio, self.ww_recognizer, self.metrics)
                timer.lap()
                extractor.calculate_range()
                self.metrics.timer("mycroft.recognizer.extractor.time_s", timer.lap())
                audio_before = extractor.get_audio_data_before()
                self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_before))
                audio_after = extractor.get_audio_data_after()
                self.metrics.timer("mycroft.recognizer.audio_extracted.length_s", self._audio_length(audio_after))

                SessionManager.touch()
                payload = {
                    'utterance': hyp.hypstr,
                    'session': SessionManager.get().session_id,
                    'pos_begin': int(extractor.range.begin),
                    'pos_end': int(extractor.range.end)
                }
                self.emitter.emit("recognizer_loop:wakeword", payload)

                try:
                    self.transcribe([audio_before, audio_after])
                except sr.UnknownValueError:
                    self.__speak("Go ahead")
                    self.state.skip_wakeword = True
                    self.metrics.increment("mycroft.wakeword")

            elif self.state.skip_wakeword:
                SessionManager.touch()
                try:
                    self.transcribe([audio])
                except sr.UnknownValueError:
                    logger.warn("Speech Recognition could not understand audio")
                    self.__speak("Sorry, I didn't catch that.")
                    self.metrics.increment("mycroft.recognizer.error")
                self.state.skip_wakeword = False
            else:
                self.metrics.clear()
        self.metrics.flush()
开发者ID:BK-University,项目名称:mycroft-core,代码行数:58,代码来源:listener.py


示例13: transcribe

 def transcribe(self, audio):
     text = None
     try:
         text = self.stt.execute(audio).lower().strip()
         LOG.debug("STT: " + text)
     except sr.RequestError as e:
         LOG.error("Could not request Speech Recognition {0}".format(e))
     except HTTPError as e:
         if e.response.status_code == 401:
             text = "pair my device"
             LOG.warn("Access Denied at mycroft.ai")
     except Exception as e:
         LOG.error(e)
         LOG.error("Speech Recognition could not understand audio")
         self.__speak("Sorry, I didn't catch that")
     if text:
         payload = {
             'utterances': [text],
             'session': SessionManager.get().session_id
         }
         self.emitter.emit("recognizer_loop:utterance", payload)
         self.metrics.attr('utterances', [text])
开发者ID:forslund,项目名称:mycroft-core,代码行数:22,代码来源:listener.py


示例14: _upload_wake_word

    def _upload_wake_word(self, audio):
        ww_module = self.wake_word_recognizer.__class__.__name__
        if ww_module == 'PreciseHotword':
            model_path = self.wake_word_recognizer.precise_model
            with open(model_path, 'rb') as f:
                model_hash = md5(f.read()).hexdigest()
        else:
            model_hash = '0'

        metadata = {
            'name': self.wake_word_name.replace(' ', '-'),
            'engine': md5(ww_module.encode('utf-8')).hexdigest(),
            'time': str(int(1000 * get_time())),
            'sessionId': SessionManager.get().session_id,
            'accountId': self.account_id,
            'model': str(model_hash)
        }
        requests.post(
            self.upload_url, files={
                'audio': BytesIO(audio.get_wav_data()),
                'metadata': StringIO(json.dumps(metadata))
            }
        )
开发者ID:Dark5ide,项目名称:mycroft-core,代码行数:23,代码来源:mic.py


示例15: wake_up

 def wake_up(self, audio):
     if self.wakeup_recognizer.found_wake_word(audio.frame_data):
         SessionManager.touch()
         self.state.sleeping = False
         self.emitter.emit('recognizer_loop:awoken')
         self.metrics.increment("mycroft.wakeup")
开发者ID:Dark5ide,项目名称:mycroft-core,代码行数:6,代码来源:listener.py


示例16: _wait_until_wake_word

    def _wait_until_wake_word(self, source, sec_per_buffer):
        """Listen continuously on source until a wake word is spoken

        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """
        num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
                               source.SAMPLE_WIDTH)

        silence = '\0' * num_silent_bytes

        # bytearray to store audio in
        byte_data = silence

        buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
        buffers_since_check = 0.0

        # Max bytes for byte_data before audio is removed from the front
        max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
        test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)

        said_wake_word = False

        # Rolling buffer to track the audio energy (loudness) heard on
        # the source recently.  An average audio energy is maintained
        # based on these levels.
        energies = []
        idx_energy = 0
        avg_energy = 0.0
        energy_avg_samples = int(5 / sec_per_buffer)  # avg over last 5 secs

        counter = 0

        while not said_wake_word and not self._stop_signaled:
            if self._skip_wake_word():
                break
            chunk = self.record_sound_chunk(source)

            energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
            if energy < self.energy_threshold * self.multiplier:
                self._adjust_threshold(energy, sec_per_buffer)

            if len(energies) < energy_avg_samples:
                # build the average
                energies.append(energy)
                avg_energy += float(energy) / energy_avg_samples
            else:
                # maintain the running average and rolling buffer
                avg_energy -= float(energies[idx_energy]) / energy_avg_samples
                avg_energy += float(energy) / energy_avg_samples
                energies[idx_energy] = energy
                idx_energy = (idx_energy + 1) % energy_avg_samples

                # maintain the threshold using average
                if energy < avg_energy * 1.5:
                    if energy > self.energy_threshold:
                        # bump the threshold to just above this value
                        self.energy_threshold = energy * 1.2

            # Periodically output energy level stats.  This can be used to
            # visualize the microphone input, e.g. a needle on a meter.
            if counter % 3:
                with open(self.mic_level_file, 'w') as f:
                    f.write("Energy:  cur=" + str(energy) + " thresh=" +
                            str(self.energy_threshold))
                f.close()
            counter += 1

            # At first, the buffer is empty and must fill up.  After that
            # just drop the first chunk bytes to keep it the same size.
            needs_to_grow = len(byte_data) < max_size
            if needs_to_grow:
                byte_data += chunk
            else:  # Remove beginning of audio and add new chunk to end
                byte_data = byte_data[len(chunk):] + chunk

            buffers_since_check += 1.0
            if buffers_since_check > buffers_per_check:
                buffers_since_check -= buffers_per_check
                chopped = byte_data[-test_size:] \
                    if test_size < len(byte_data) else byte_data
                audio_data = chopped + silence
                said_wake_word = \
                    self.wake_word_recognizer.found_wake_word(audio_data)
                # if a wake word is success full then record audio in temp
                # file.
                if self.save_wake_words and said_wake_word:
                    audio = self._create_audio_data(byte_data, source)
                    stamp = str(int(1000 * get_time()))
                    uid = SessionManager.get().session_id
                    if not isdir(self.save_wake_words_dir):
                        mkdir(self.save_wake_words_dir)

                    dr = self.save_wake_words_dir
                    ww = self.wake_word_name.replace(' ', '-')
                    filename = join(dr, ww + '.' + stamp + '.' + uid + '.wav')
                    with open(filename, 'wb') as f:
                        f.write(audio.get_wav_data())

#.........这里部分代码省略.........
开发者ID:aatchison,项目名称:mycroft-core,代码行数:101,代码来源:mic.py


示例17: __speak

 def __speak(self, utterance):
     payload = {
         'utterance': utterance,
         'session': SessionManager.get().session_id
     }
     self.emitter.emit("speak", Message("speak", metadata=payload))
开发者ID:Irrelon,项目名称:mycroft-core,代码行数:6,代码来源:listener.py


示例18: target

 def target():
     self.emitter.emit(
         "speak",
         Message("speak",
                 metadata={'utterance': utterance,
                           'session': SessionManager.get().session_id}))
开发者ID:Alphacodeclub,项目名称:mycroft-core,代码行数:6,代码来源:listener.py



注:本文中的mycroft.session.SessionManager类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python log.getLogger函数代码示例发布时间:2022-05-27
下一篇:
Python ws.WebsocketClient类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap