• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python naoqi.ALProxy类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中naoqi.ALProxy的典型用法代码示例。如果您正苦于以下问题:Python ALProxy类的具体用法?Python ALProxy怎么用?Python ALProxy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ALProxy类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: TactileHeadModule

class TactileHeadModule(ALModule):
    AudioModule = None
    def __init__(self, name, audiomodule):
        ALModule.__init__(self, name)
        
        self.AudioModule = audiomodule

        # Create a proxy to ALTextToSpeech for later use
        self.tts = ALProxy("ALTextToSpeech")

        # Subscribe to TouchChanged event:
        global memory
        memory = ALProxy("ALMemory")
        memory.subscribeToEvent("MiddleTactilTouched",
            "ReactToTouch",
            "onTouched")

    def onTouched(self, strVarName, value):
        """ This will be called each time a touch
        is detected.

        """
        # Unsubscribe to the event when talking,
        # to avoid repetitions
        memory.unsubscribeToEvent("MiddleTactilTouched",
            "ReactToTouch")
        self.tts.say("D'accord, on arrête de jouer")
        self.AudioModule.cs = 0

        # Subscribe again to the event
        memory.subscribeToEvent("MiddleTactilTouched",
            "ReactToTouch",
            "onTouched")
开发者ID:FallingTree,项目名称:PrehensionNao,代码行数:33,代码来源:TactileHeadModule.py


示例2: start_sound_track

    def start_sound_track(self, msg):
        self.__proxyTTS = ALProxy("ALAnimatedSpeech", self.__ip, self.__port)
        
        # set the local configuration
        sayconfig = {"bodyLanguageMode":"contextual"}
        
        self.__proxyTTS.say("Can you help me find you by clapping your hand?", sayconfig)
        
        self.__proxyMotion = ALProxy("ALMotion", self.__ip, self.__port)
        
        #initialise microphone
        #self.__audioProxy = ALProxy("ALAudioDevice", self.__ip, self.__port)
        #initialise soundsourcelocalisation
        self.__sslProxy = ALProxy("ALSoundLocalization", self.__ip, self.__port)
        #initialise almemory
        self.__memoryProxy = ALProxy("ALMemory", self.__ip, self.__port)
        #debugging purpose
        #self.__audioProxy.setClientPreferences( self.getName() , 16000, 3, 0 )
        #self.__audioProxy.subscribe(self.getName())
        
        #configure sound detection
        self.__sslProxy.setParameter("Sensitivity",0.1)

        #callback from memory      
        try:
            self.__memoryProxy.unsubscribeToEvent("ALSoundLocalization/SoundLocated","soundtracking")
        except:
            pass
        
        self.__sslProxy.subscribe("sound_source_locator")
        self.__memoryProxy.subscribeToMicroEvent(
            "ALSoundLocalization/SoundLocated",
            self.getName(), 
            "AnotherUserDataToIdentifyEvent", 
            "sound_callback")
开发者ID:dantarakan,项目名称:qbot,代码行数:35,代码来源:naosoundtracking.py


示例3: main

def main(robotIP, PORT=9559):
    motionProxy = ALProxy("ALMotion", robotIP, PORT)

    # Example showing how to get the robot config
    robotConfig = motionProxy.getRobotConfig()
    for i in range(len(robotConfig[0])):
        print robotConfig[0][i], ": ", robotConfig[1][i]
开发者ID:sayantanauddy,项目名称:robot_walking,代码行数:7,代码来源:almotion_getRobotConfig.py


示例4: showNaoImage

def showNaoImage(IP, PORT):
  camProxy = ALProxy("ALVideoDevice", IP, PORT)
  resolution = 2    # VGA
  colorSpace = 11   # RGB

  videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)

  # Get a camera image.
  # image[6] contains the image data passed as an array of ASCII chars.
  naoImage = camProxy.getImageRemote(videoClient)

  camProxy.unsubscribe(videoClient)


  # Now we work with the image returned and save it as a PNG  using ImageDraw
  # package.

  # Get the image size and pixel array.
  imageWidth = naoImage[0]
  imageHeight = naoImage[1]
  array = naoImage[6]

  # Create a PIL Image from our pixel array.
  im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
  
  # Save the image.
  im.save("../public/imgNao/live.jpeg", "JPEG")
开发者ID:matth02100,项目名称:Nao,代码行数:27,代码来源:live.py


示例5: getImage

 def getImage(self):
     """main method, wait until qr code is found."""
     period = 1000
     qrCodeProxy = ALProxy("ALBarcodeReader", self.NAO_IP, self.NAO_PORT)
     qrCodeProxy.subscribe("Testh_qr", period, 0.0)
     detected = False
     i = 0
     while detected is False:
         time.sleep(0.5)
         val = self.memory.getData("BarcodeReader/BarcodeDetected")
         print val
         if val is not None:
             if len(val) >= 1:
                 detected = True
                 todo = val[0][0]
                 ac = todo.split(" ", 1)
                 if len(ac) > 1:
                     action = self.nao.getAction().get(str(ac[0]))
                     action(str(ac[1]))
                     self.memory.insertData("BarcodeReader/BarcodeDetected", "")
                 else:
                     action = self.nao.getAction().get(todo)
                     action()
                     self.memory.insertData("BarcodeReader/BarcodeDetected", "")
         i += 1
         if i is 30:
             detected = True
开发者ID:NathanGrimaud,项目名称:NaoAccueil,代码行数:27,代码来源:qrRetreiver.py


示例6: powerOff

 def powerOff (self):
     tts = ALProxy("ALTextToSpeech", 'nao.local', 9559)
     tts.say("即将执行关机操作!")
     command1 = 'sudo shutdown -h now'
     os.system(command1)
     command2 = 'root\r'  # here is tha default password of root user
     os.system(command2)
开发者ID:ZiqianXY,项目名称:NaoController,代码行数:7,代码来源:service.py


示例7: main

def main():
    """ Parse command line arguments,
    run recordData and write the results
    into a csv file

    """
    if len(sys.argv) < 2:
        nao_ip = ROBOT_IP
    else:
        nao_ip = sys.argv[1]

    motion = ALProxy("ALMotion", nao_ip, 9559)
    # Set stiffness on for Head motors
    motion.setStiffnesses("Head", 1.0)
    # Will go to 1.0 then 0 radian
    # in two seconds
    motion.post.angleInterpolation(
        ["HeadYaw"],
        [1.0, 0.0],
        [1  , 2],
        False
    )
    data = recordData(nao_ip)
    # Gently set stiff off for Head motors
    motion.setStiffnesses("Head", 0.0)

    output = os.path.abspath("record.csv")

    with open(output, "w") as fp:
        for line in data:
            fp.write("; ".join(str(x) for x in line))
            fp.write("\n")

    print "Results written to", output
开发者ID:KellyChan,项目名称:python-examples,代码行数:34,代码来源:data_recording.py


示例8: update_battery

    def update_battery(self):
         
         if (self.ip.get()) == "Disconnected":
             self.battery_status.set("0 %")
         else :
             try:
                ## import naoqi library and creates all the relevant modules that will be used 
                from naoqi import ALProxy
                                
                self.memory = ALProxy("ALMemory", self.ip.get() , 9559)
                self.memory.ping()       
                self.battery = ALProxy("ALBattery", self.ip.get() , 9559)
                self.tts = ALProxy("ALTextToSpeech", self.ip.get() , 9559)
                self.motion = ALProxy("ALMotion" , self.ip.get() , 9559)
                self.posture = ALProxy("ALRobotPosture", self.ip.get(), 9559)
                
                #Sets the status of the battery to the existent value
                self.battery_status.set(str(self.battery.getBatteryCharge())  + " %")
              
                # Thread that updates the battery status after specific period of time   
                #threading.Timer(5.0, self.update_battery).start()  
                threading.Timer(15.0, self.update_battery).start() 

             except BaseException:
				 
                 self.ip.set("Disconnected")  
                 return    
开发者ID:panos5,项目名称:Panagiotis_Filiotis_Thesis,代码行数:27,代码来源:Main.py


示例9: __init__

    def __init__(self, name): #contructor of the class, which takes two parameters, self refers to the instance of the class and the name parameter which is just a string
        ALModule.__init__(self, name) #calling of the contructpor of the ALModule
        self.tts = ALProxy("ALTextToSpeech", ip, 9559) #proxy creation on the tts module
        self.asr = ALProxy("ALSpeechRecognition", ip, 9559) #proxy creation on the asr module
        self.memory = ALProxy("ALMemory", ip, 9559) #proxy creation on the memory module

        self.num1 = random.randint(1, 10) #here are two integers randomly selected from 1 to 10
        self.num2 = random.randint(1, 10)
        self.operator = random.choice("-") #here is randomly choosen operator which is then applied to the equation
        self.tts.setLanguage("English")  #set the the language which NAO uses for talking

        if self.operator == "-": #NAO was programmed to create equations which have a positive result
            if self.num1 > self.num2: #the numbers are compared in order to asure that the larger number is first
                self.result = str(eval(str(self.num1) + self.operator + str(self.num2))) #the result is evaluated and put into a string so NOA can say it
                self.operator = " minus " #and so is the operator
                self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?" #the question is created
            else:
                self.result = str(eval(str(self.num2) + self.operator + str(self.num1)))
                self.operator = " minus "
                self.question = "What is the result of " + str(self.num2) + self.operator + str(self.num1) + "?"
        else:
            self.result = str(eval(str(self.num1) + self.operator + str(self.num2)))
            self.operator = " plus "
            self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?"

        print self.question #the question is printed to the terminal
        print self.result #the reslt is printed to the terminal
        self.tts.say(self.question) #NAO tells the question
        self.speech_recognition() #the speech_recognition method is called
开发者ID:peterhalachan,项目名称:nao,代码行数:29,代码来源:TellTheResult.py


示例10: getColour

def getColour(IP, PORT):
    """
First get an image from Nao, then show it on the screen with PIL.
    :param IP:
    :param PORT:
"""


    myBroker = ALBroker("myBroker",
        "0.0.0.0", # listen to anyone
        0, # find a free port and use it
        IP, # parent broker IP
        PORT) # parent broker port

    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2 # VGA
    colorSpace = 11 # RGB


    videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    #print "Runde: ", b

    camProxy.unsubscribe(videoClient)


    # Now we work with the image returned and save it as a PNG using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    #Create a PIL Image Instance from our pixel array.
    img0= Image.frombytes("RGB", (imageWidth, imageHeight), array)


    #frame=np.asarray(convert2pil(img0)[:,:])

    #object_rect2=detectColor(img0, RED_MIN,RED_MAX)
    frame=detectShape(img0, RED_MIN,RED_MAX)

    #frame=selectDetected(object_rect1,frame)

    #frame=selectDetected(object_rect2,frame)
    # currentImage = path+ "/camImage1cm.jpg"
    # cv2.imwrite(currentImage, frame)
    cv2.imshow('contour',frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
开发者ID:vbabushkin,项目名称:RoboticVisionLegoGame,代码行数:60,代码来源:shapeDetectionNAO.py


示例11: FaceDetectionModule

class FaceDetectionModule(ALModule):
	# Déclaration de méthode.
	def __init__(self, name):
		ALModule.__init__(self, name)
		
		print "[INFO ] FaceDetectionModule initialization"

		# Instanciation d'un objet tts de classe ALTextToSpeech.
		self.tts = ALProxy("ALTextToSpeech")
		self.tts.setLanguage("french")
		# Instanciation d'un objet tts de classe ALFaceDetection.
		self.fd  = ALProxy("ALFaceDetection")

		# Variable d'instance.
		global memory		
		# Instanciation d'un objet memory de classe ALMemory.
		memory = ALProxy("ALMemory")
		# Appel de la methode subsribeToEvent...
		memory.subscribeToEvent("FaceDetected", 	  # Sur cet evenement...
								"FaceDetection",	  # ...de cet instance...
								"onDetection")		  # ...declancher l'appel
													  # ...de cette methode.
		print "[INFO ] FaceDetectionModule initialized"

	# Méthode appelée sur l'évènement.
	def onDetection(self, *_args):
		print "[INFO ] FaceDetection: Face detected"

		global face_nb
		print "[INFO ] FaceDetection initialize face detection process"
		learnFaceProcess(self, face_nb)
开发者ID:NSenaud,项目名称:Reco-Follow,代码行数:31,代码来源:learnFace.py


示例12: getMarkXYZ

def getMarkXYZ (IP, portNumber, markData, landmarkSize):
    print "0"
    currentCamera = "CameraTop"
    print "1"
    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]
    print "2"
    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkSize / ( 2 * math.tan( angularSize / 2))
    print "3"
    motionProxy = ALProxy("ALMotion", IP, portNumber)
    print "4"
    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    print "5"
    wyCamera = markData[1][0][0][2]
    print "6"
    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    print "7"
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

    return robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4
开发者ID:afranka69,项目名称:ShowRobbie,代码行数:33,代码来源:NaoMarkModule.py


示例13: start

 def start(self):
     if self.al.connected():
         self.tts.say("You are already connected")
     else:
         self.networks = self.al.list_networks()
         self.tts.say("Here are the Wi Fi networks")
         for num, network in enumerate(self.networks, 1):
             self.tts.say(network)
             self.tts.say("is number %d" % (num,))
             time.sleep(0.2)
         if len(self.networks) == 0:
             self.tts.say("Sorry you are in a wifi free zone")
         else:
             self.tts.say("Which number Wi Fi network shall I connect to?")
             try:
                 self.memory.unsubscribeToEvent("WordRecognized")
             except Exception:
                 pass
             speech_recognition = ALProxy("ALSpeechRecognition", NAO_IP, 9559)
             speech_recognition.setLanguage("English")
             try:
                 speech_recognition.setWordListAsVocabulary([str(i) for i in range(1, len(self.networks))])
             except Exception:
                 self.tts.say("Could not set vocabulary")
             try:
                 result = self.memory.subscribeToEvent("WordRecognized", self.module_name, "on_word_recognised")
                 print "Subscribed to event WordRecognized with package ", self.module_name, " and result ", result
             except Exception as e:
                 print "Failed to subscribe ", e
开发者ID:zaheerm,项目名称:naowificonnector,代码行数:29,代码来源:naowifi.py


示例14: NaoTTS

class NaoTTS(object):
    """
    Nao text-to-speech service
    """
    def __init__(self):
        """
        Sets up members
        """
        self._tts = None

        # Authorization to speak
        self._can_speak = threading.Event()
        self._can_speak.set()
        self.__speaking_lock = threading.Lock()

    @Validate
    def validate(self, context):
        """
        Component validated
        """
        # Set up the TTS proxy
        self._tts = ALProxy("ALTextToSpeech")

    @Invalidate
    def invalidate(self, context):
        """
        Component invalidated
        """
        # Stop using the proxy
        self._tts = None

        # Unlock everything
        self._can_speak.set()

    def say(self, sentence):
        """
        Says the given sentence

        :param sentence: Text to say
        """
        with self.__speaking_lock:
            # Wait to be authorized to speak
            self._can_speak.wait()

            # Say what we have to
            self._tts.say(sentence)

    def resume(self):
        """
        Allows Nao to speak
        """
        self._can_speak.set()

    def pause(self):
        """
        Forbids Nao to speak
        """
        if self._can_speak.is_set():
            with self.__speaking_lock:
                self._can_speak.clear()
开发者ID:RalfMueller1988,项目名称:demo-ipopo-nao,代码行数:60,代码来源:tts.py


示例15: getNaoImage

def getNaoImage(IP, PORT):
    
    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    
    resolution = 2 # 640*480px http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameraresolution-mt9m114
    colorSpace = 11 # RGB colorspace http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameracolorspace-mt9m114
    fps = 5 # can be 0-30 fps

    videoClient = camProxy.subscribe("python_client", resolution, colorSpace, fps)
    t0 = time.time()
    naoImage = camProxy.getImageRemote(videoClient)
    t1 = time.time()
    
    camProxy.unsubscribe(videoClient)

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    # Create a PIL Image from our pixel array.
    im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
    #grab image from PIL and convert to opencv image
    img = np.array(im)
    img = img[:, :, ::-1].copy()

    #im.save(name,"PNG")
    
    
    print "acquisition delay ", t1 - t0
    return img
开发者ID:davidlavy88,项目名称:nao_and_opencv-python-,代码行数:31,代码来源:capture_contour_cleaner2.py


示例16: BoardMemoryMonitor

class BoardMemoryMonitor(object):
    def __init__(self, args):
        self._prefix = 'Device/DeviceList'
        self._board = args.board
        self._key = args.key
        try:
            self._mem = ALProxy('ALMemory', args.url, args.port)
            self._out = UnbufferedStreamWrapper(stdout)
        except RuntimeError as e:
            exceptRgx = compile('[^\n\t]+$')
            print '\n', 'RuntimeError:', exceptRgx.search(e.args[0]).group(0)
            exit(1)

    def run(self):
        progVersion = self._mem.getData('/'.join([self._prefix,
                                        self._board,
                                        'ProgVersion']))
        system('clear')
        print 'Monitoring key \'{0}\' on board \'{1}\' [ProgVersion: {2}]'.format(self._key,
                                                                                  self._board,
                                                                                  str(progVersion))
        while True:
            data = self._mem.getData('/'.join([self._prefix, self._board, self._key]))
            self._out.write(str(data))
            self._out.write('   \r')
开发者ID:d0nn13,项目名称:python-tools,代码行数:25,代码来源:BoardMemoryMonitor.py


示例17: HumanGreeterModule

class HumanGreeterModule(ALModule):
    """ A simple module able to react
    to facedetection events

    """
    def __init__(self, name):
        ALModule.__init__(self, name)
        # No need for IP and port here because
        # we have our Python broker connected to NAOqi broker

        # Create a proxy to ALTextToSpeech for later use
        self.tts = ALProxy("ALTextToSpeech")

        # Subscribe to the FaceDetected event:
        global memory
        memory = ALProxy("ALMemory")
        memory.subscribeToEvent("FaceDetected","HumanGreeter", "onFaceDetected")

    def onFaceDetected(self, *_args):
        """ This will be called each time a face is
        detected."""
        # Unsubscribe to the event when talking,
        # to avoid repetitions
        memory.unsubscribeToEvent("FaceDetected", "HumanGreeter")

        self.tts.say("je suis content")

        # Subscribe again to the event
        memory.subscribeToEvent("FaceDetected", "HumanGreeter", "onFaceDetected")
开发者ID:libaneupmc,项目名称:projetIntegratif,代码行数:29,代码来源:Hello_you.py


示例18: MoveAction_MoveFor

def MoveAction_MoveFor(robotIP, PORT=9559):
    # 前进
    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)


    # Send robot to Stand
    postureProxy.goToPosture("StandInit", 0.5)

    #####################
    ## Enable arms control by Motion algorithm
    #####################
    motionProxy.setMoveArmsEnabled(True, True)
    # motionProxy.setMoveArmsEnabled(False, False)

    #####################
    ## FOOT CONTACT PROTECTION
    #####################
    #motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", False]])
    motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])

    #TARGET VELOCITY
    X = 0.8
    Y = 0.0
    Theta = 0.0
    Frequency =1.0 # max speed
    try:
        motionProxy.moveToward(X, Y, Theta, [["Frequency", Frequency]])
    except Exception, errorMsg:
        print str(errorMsg)
        print "This example is not allowed on this robot."
        exit()
开发者ID:L-SEG,项目名称:PythonAndNao,代码行数:32,代码来源:MoveAction.py


示例19: showNaoImage

def showNaoImage(IP, PORT):
    camProxy = ALProxy("ALVideoDevice", IP, PORT)
    resolution = 2  # VGA
    colorSpace = 11  # RGB

    videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)

    t0 = time.time()

    # Get a camera image.
    # image[6] contains the image data passed as an array of ASCII chars.
    naoImage = camProxy.getImageRemote(videoClient)

    t1 = time.time()

    # Time the image transfer.
    print "acquisition delay ", t1 - t0

    camProxy.unsubscribe(videoClient)

    # Now we work with the image returned and save it as a PNG  using ImageDraw
    # package.

    # Get the image size and pixel array.
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]

    # Create a PIL Image from our pixel array.
    im = Image.fromstring("RGB", (imageWidth, imageHeight), array)

    nomPhoto = time.strftime("%d-%m-%y_a_%H-%M-%S", time.localtime())
    print nomPhoto
    # Save the image.
    im.save("../public/imgNao/" + nomPhoto + ".jpeg", "JPEG")
开发者ID:Cydev2306,项目名称:Nao-App,代码行数:35,代码来源:prendrePhoto.py


示例20: _validate

    def _validate(self, context):
        """
        Component validated
        """
        _logger.debug("Validating speech...")

        # Register the module as a global in __main__
        constants.register_almodule(self._name, self)

        # Initialize the module
        ALModule.__init__(self, self._name)

        # Get the "memory" proxy, to register to callbacks
        self._memory = ALProxy("ALMemory")

        # Just to be sure...
        try:
            self._memory.unsubscribeToEvent("WordRecognized", self._name)
        except:
            _logger.debug("Speech wasn't yet registered")

        # Create the proxy
        self._recog = ALProxy("ALSpeechRecognition")
        self._recog.setLanguage("French")

        # We're ready
        self._can_recog.set()

        _logger.debug("Speech ready")
开发者ID:tcalmant,项目名称:demo-ipopo-nao,代码行数:29,代码来源:speech.py



注:本文中的naoqi.ALProxy类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python naoqi_node.NaoqiNode类代码示例发布时间:2022-05-27
下一篇:
Python naoqi.ALModule类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap