• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ qcar::State类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中qcar::State的典型用法代码示例。如果您正苦于以下问题:C++ State类的具体用法?C++ State怎么用?C++ State使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了State类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: assert

JNIEXPORT jobject JNICALL
Java_org_ronhuang_vistroller_Vistroller_getMarker(JNIEnv *env, jobject)
{
    //LOG("Java_org_ronhuang_vistroller_Vistroller_getMarker");

    jobject result = NULL;

    QCAR::State state = QCAR::Renderer::getInstance().begin();

    for (int i = 0; i < state.getNumActiveTrackables(); i++) {
        const QCAR::Trackable *trackable = state.getActiveTrackable(i);
        assert(trackable->getType() == QCAR::Trackable::MARKER);
        const QCAR::Marker *marker = static_cast<const QCAR::Marker*>(trackable);

        result = newMarker(env, marker->getMarkerId(), marker->getPose().data, marker->getSize().data);
        break; // FIXME: return only the first one for now.
    }

    if (NULL == result) {
        // Make sure always return an instance of Trackable.
        // Return invalid one.
        float pose[3 * 4] = {
            0, 0, 0, 0,
            0, 0, 0, 0,
            0, 0, 0, 0
        };
        float size[2] = {0, 0};
        result = newMarker(env, -1, pose, size);
    }

    QCAR::Renderer::getInstance().end();

    return result;
}
开发者ID:ronhuang,项目名称:vistroller,代码行数:34,代码来源:Vistroller.cpp


示例2: return

// Just returns 1 for OpenGLEs 1.1 and 2 for other
JNIEXPORT bool JNICALL
Java_edu_ethz_s3d_S3D_hasTarget(JNIEnv *, jobject)
{
	// Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();

    return (state.getNumActiveTrackables() > 0);
}
开发者ID:Alice-,项目名称:CVL,代码行数:9,代码来源:ImageTargets.cpp


示例3: glClear

JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_Dominoes_DominoesRenderer_renderFrame(JNIEnv* , jobject)
{
    // Clear the color and depth buffers
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    
    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    
    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();
    
    // to no to hide
	std::vector<const char*> found;

    // Did we find any trackables this frame?
    if (state.getNumTrackableResults() > 0) {

    	for(int tIdx = 0; tIdx < state.getNumTrackableResults(); ++tIdx) {

			// Get the first trackable
			const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
			const QCAR::Trackable& trackable = trackableResult->getTrackable();

			found.push_back(trackable.getName());

			// The image target specific result:
			assert(trackableResult->getType() == QCAR::TrackableResult::IMAGE_TARGET_RESULT);
			const QCAR::ImageTargetResult* imageTargetResult =
				static_cast<const QCAR::ImageTargetResult*>(trackableResult);

			// If this is our first time seeing the target, display a tip
			if (!displayedMessage) {
				displayMessage("Find marker man!");
				displayedMessage = true;
			}


			//const QCAR::TrackerManager& trackerManager = QCAR::TrackerManager::getInstance();
			//const QCAR::Tracker* tracker = trackerManager.getTracker(QCAR::Tracker::IMAGE_TRACKER);
			const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();

			QCAR::Vec2F cameraPoint = QCAR::Tool::projectPoint(cameraCalibration, trackableResult->getPose(), QCAR::Vec3F(0,0,0));
			QCAR::Vec2F xyPoint = cameraPointToScreenPoint(cameraPoint);

			showTrackerButton(xyPoint.data[0], xyPoint.data[1], trackable.getName());

		}

    } else {
    	hideTrackerButton(found);
    }
    
    QCAR::Renderer::getInstance().end();
}
开发者ID:Kajo0,项目名称:pdi1-tests,代码行数:55,代码来源:Dominoes.cpp


示例4: QCAR_onUpdate

    virtual void QCAR_onUpdate(QCAR::State& state)
    {

    	//from
        //https://developer.vuforia.com/forum/faq/android-how-can-i-access-camera-image

    	QCAR::Image *imageRGB565 = NULL;
        QCAR::Frame frame = state.getFrame();

        for (int i = 0; i < frame.getNumImages(); ++i) {
              const QCAR::Image *image = frame.getImage(i);
              if (image->getFormat() == QCAR::RGB565) {
                  imageRGB565 = (QCAR::Image*)image;

                  break;
              }
        }

        if (imageRGB565) {
            JNIEnv* env = 0;

            if ((javaVM != 0) && (activityObj != 0) && (javaVM->GetEnv((void**)&env, JNI_VERSION_1_4) == JNI_OK)) {

                const short* pixels = (const short*) imageRGB565->getPixels();
                int width = imageRGB565->getWidth();
                int height = imageRGB565->getHeight();
                int numPixels = width * height;


               // LOG("Update video image...");

                jbyteArray pixelArray = env->NewByteArray(numPixels * 2);
                env->SetByteArrayRegion(pixelArray, 0, numPixels * 2, (const jbyte*) pixels);


                jclass javaClass = env->GetObjectClass(activityObj);
                jmethodID method = env-> GetMethodID(javaClass, "setRGB565CameraImage", "([BII)V");
                env->CallVoidMethod(activityObj, method, pixelArray, width, height);

                env->DeleteLocalRef(pixelArray);


            }
        }

    }
开发者ID:Omarasifshaikh,项目名称:ar4android,代码行数:46,代码来源:VuforiaNative.cpp


示例5: glClear

JNIEXPORT void JNICALL
Java_edu_pugetsound_vichar_ar_ARGameRenderer_renderFrame(JNIEnv * env, jobject obj, jboolean updated, jfloatArray test, jint objSize)
{
	bool update;
	update = (bool) updated; //so we know whether or not to update the drawlist.
	float testScale = 0.3f;

	// here is an example of how to pull the elements out of the jfloatArray. I think c++ will implicitly handle the type casting of jfloats as floats,
	// but if you are getting errors, you can always explicitly type cast them like so (assuming you have jfloats in the array):
	// float x;
	// x = (float) posData[i];
	
	if(update){
		int i = 0;
		int j = 0;
		jsize len = env->GetArrayLength(test);
		jfloat* posData = env->GetFloatArrayElements(test, 0);
		while(i<len && posData[(i/objSize)*objSize] != 0){
			LOG("JSON to JNI test. Pos. %d : %f", i, posData[i]); //print the elements of the array.
			interpList[i/objSize][i%objSize]= (float) posData[i] * testScale;
			i++;
		}
		interpLength=(i)/objSize;
		LOG("%i", interpLength);
		env->ReleaseFloatArrayElements(test, posData, 0); //release memory
	}

    //LOG("Java_edu_pugetsound_vichar_ar_GLRenderer_renderFrame");
    // Clear color and depth buffer
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();

    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();

#ifdef USE_OPENGL_ES_1_1
    // Set GL11 flags:
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_NORMAL_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);

#endif

    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
    {
        // Get the trackable:
        const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
        QCAR::Matrix44F modelViewMatrix =
            QCAR::Tool::convertPose2GLMatrix(trackable->getPose());

//Begin additions by Erin================================================================================
        QCAR::Matrix34F test;   //gets inverse pos matrix
        QCAR::Matrix34F pos;   //Gets positional data
        pos = trackable->getPose();

        //Get inverse
        test = SampleMath::phoneCoorMatrix(trackable->getPose());

        //Print results
//        LOG("Poisiton:");
//        LOG("%f %f %f %f",pos.data[0], pos.data[1], pos.data[2], pos.data[3]);
//        LOG("%f %f %f %f",pos.data[4], pos.data[5], pos.data[6], pos.data[7]);
//        LOG("%f %f %f %f",pos.data[8], pos.data[9], pos.data[10],pos.data[11]);
//        LOG("Inverse:");
//        LOG("%f %f %f %f",test.data[0], test.data[1], test.data[2], test.data[3]);
//        LOG("%f %f %f %f",test.data[4], test.data[5], test.data[6], test.data[7]);
//        LOG("%f %f %f %f",test.data[8], test.data[9], test.data[10], test.data[11]);
//        LOG("=========================");
        phoneLoc[0] = 1.0f;
        phoneLoc[1] = test.data[3];
        phoneLoc[2] = test.data[7];
        phoneLoc[3] = test.data[11];
//End============================================================================================

        // Assign Textures according in the texture indices defined at the beginning of the file, and based
        // on the loadTextures() method in ARGameActivity.java.


        const Texture* const tower_shellTexture = textures[tower_shellIndex];
        const Texture* const tower_topTexture = textures[tower_topIndex];
        const Texture* const bananaTexture = textures[banana180Index];

#ifdef USE_OPENGL_ES_1_1
        // Load projection matrix:
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // Load model view matrix:
        glMatrixMode(GL_MODELVIEW);
        glLoadMatrixf(modelViewMatrix.data);
        glTranslatef(0.f, 0.f, kObjectScale);
//.........这里部分代码省略.........
开发者ID:UPS-CS240-F12,项目名称:mobile,代码行数:101,代码来源:ImageTargets.cpp


示例6: glClear

JNIEXPORT void JNICALL
Java_com_codered_ared_TextRecoRenderer_renderFrame(JNIEnv * env, jobject obj)
{
    //LOG("JJava_com_codered_ared_TextRecoRenderer_renderFrame");

    // Clear color and depth buffer 
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);


    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    
    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();

    glEnable(GL_DEPTH_TEST);

    // We need Front Face, CW for the back camera and Front Face CCW for the front camera...
    // or more accuratly, we need CW for 0 and 2 reflections and CCW for 1 reflection
    glEnable(GL_CULL_FACE);
    glCullFace(GL_FRONT);
    if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
    {
        glFrontFace(GL_CCW);  //Front camera
    }
    else
    {
        glFrontFace(GL_CW);   //Back camera
    }

    // Enable blending to support transparency
    glEnable(GL_BLEND);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);

    jclass rendererJavaClass = env->GetObjectClass(obj);

    env->CallVoidMethod(obj, env->GetMethodID(rendererJavaClass, "wordsStartLoop", "()V"));

    NbWordsFound = 0;

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
        const QCAR::Trackable& trackable = result->getTrackable();

        QCAR::Matrix44F modelViewMatrix =
            QCAR::Tool::convertPose2GLMatrix(result->getPose());        

        QCAR::Vec2F wordBoxSize(0, 0);

        if (result->getType() == QCAR::TrackableResult::WORD_RESULT)
        {
            const QCAR::WordResult* wordResult = (const QCAR::WordResult*) result;
            // Get the word
            const QCAR::Word& word = wordResult->getTrackable();
            const QCAR::Obb2D& obb = wordResult->getObb();
            wordBoxSize = word.getSize();

            if (word.getStringU())
            {
                // in portrait, the obb coordinate is based on
                // a 0,0 position being in the upper right corner
                // with :
                // X growing from top to bottom and
                // Y growing from right to left
                //
                // we convert those coordinates to be more natural
                // with our application:
                // - 0,0 is the upper left corner
                // - X grows from left to right
                // - Y grows from top to bottom
                float wordx = - obb.getCenter().data[1];
                float wordy = obb.getCenter().data[0];

                // For debugging purposes convert the string to 7bit ASCII
                // (if possible) and log it.
                char* stringA = 0;
                if (unicodeToAscii(word, stringA))
                {
                    // we store the word
                    if (NbWordsFound < MAX_NB_WORDS)
                    {
                        struct WordDesc * word =  & WordsFound[NbWordsFound];
                        NbWordsFound++;
                        strncpy(word->text, stringA, MAX_WORD_LENGTH - 1);
                        word->text[MAX_WORD_LENGTH - 1] = '\0';
                        word->Ax = wordx - (int)(wordBoxSize.data[0] / 2);
                        word->Ay = wordy - (int)(wordBoxSize.data[1] / 2);
                        word->Bx = wordx + (int)(wordBoxSize.data[0] / 2);
                        word->By = wordy + (int)(wordBoxSize.data[1] / 2);
                    }

                    delete[] stringA;
                }
            }
        }
        else
        {
//.........这里部分代码省略.........
开发者ID:varunkumar,项目名称:aRed,代码行数:101,代码来源:TextReco.cpp


示例7: glDisableVertexAttribArray

JNIEXPORT void JNICALL
Java_com_tvc_supastriker_SupaStrikerRenderer_renderFrame(JNIEnv* env, jobject obj){

	LOG("Java_com_tvc_supastriker_SupaStrikerRenderer_renderFrame");

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	QCAR::State state = QCAR::Renderer::getInstance().begin();

	QCAR::Renderer::getInstance().drawVideoBackground();

#ifdef USE_OPENGL_ES_1_1
	glEnableClientState(GL_VERTEX_ARRAY);
	glEnableClientState(GL_NORMAL_ARRAY);
	glEnableClientState(GL_TEXTURE_COORD_ARRAY);

	glEnable(GL_TEXTURE_2D);
	//glDisable(GL_LIGHTING);
	glEnable(GL_LIGHTING);
#endif

	glEnable(GL_DEPTH_TEST);
	//glEnable(GL_CULL_FACE);
	glDisable(GL_CULL_FACE);
	glCullFace(GL_BACK);

	if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
		glFrontFace(GL_CCW);
	else
		glFrontFace(GL_CCW);

	for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++){
		const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
		const QCAR::Trackable& trackable = result->getTrackable();
		QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());

		int textureIndex;
		if(strcmp(trackable.getName(), "SupaStrika") == 0){
			textureIndex = 0;
		}

		const Texture* const thisTexture = textures[textureIndex];

#ifdef USE_OPENGL_ES_1_1
		//load projection matrix
		glMatrixMode(GL_PROJECTION);
		glLoadMatrixf(projectionMatrix.data);

		//load model view matrix
		glMatrixMode(GL_MODELVIEW);

		glLoadMatrixf(modelViewMatrix.data);
		glTranslatef(0.f, 0.f, kObjectScale);
		glScalef(kObjectScale, kObjectScale, kObjectScale);

		//draw object
		glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
		glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
		glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
		glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &teapotNormals[0]);
		//glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
		//		(const GLvoid*) &teapotIndices[0]);
		glDrawArrays(GL_TRIANGLES, 0, NUM_TEAPOT_OBJECT_VERTEX);
#else
		QCAR::Matrix44F modelViewProjection;

        SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale,
                                         &modelViewMatrix.data[0]);
		SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
				&modelViewMatrix.data[0]);
		SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
				&modelViewMatrix.data[0],
				&modelViewProjection.data[0]);

		glUseProgram(shaderProgramID);
		glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotVertices[0]);
		glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotNormals[0]);
		glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
				(const GLvoid*) &teapotTexCoords[0]);
		glEnableVertexAttribArray(vertexHandle);
		glEnableVertexAttribArray(normalHandle);
		glEnableVertexAttribArray(textureCoordHandle);

		glActiveTexture(GL_TEXTURE0);
		glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
		glUniform1i(texSampler2DHandle, 0);
		glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE,
				(GLfloat*) &modelViewProjection.data[0]);
		glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
				(const GLvoid*) &teapotIndices[0]);
		SampleUtils::checkGlError("SupaStriker renderFrame");
#endif
	}

	glDisable(GL_DEPTH_TEST);

#ifdef USE_OPENGL_ES_1_1
	glDisable(GL_TEXTURE_2D);
//.........这里部分代码省略.........
开发者ID:oladipo,项目名称:SupaStrika,代码行数:101,代码来源:SupaStriker.cpp


示例8: screenPoint

JNIEXPORT jboolean JNICALL
Java_com_snda_sdar_ImageTargetsRenderer_renderFrame(JNIEnv *env, jobject obj)
{
	//LOG("Java_com_snda_sdar_ImageTargets_GLRenderer_renderFrame");

	// Clear color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	jboolean isDetected = false;

	// Render video background:
	QCAR::State state = QCAR::Renderer::getInstance().begin();

	// Explicitly render the Video Background
	QCAR::Renderer::getInstance().drawVideoBackground();

#ifdef USE_OPENGL_ES_1_1
	// Set GL11 flags:
	glEnableClientState(GL_VERTEX_ARRAY);
	glEnableClientState(GL_NORMAL_ARRAY);
	glEnableClientState(GL_TEXTURE_COORD_ARRAY);

	glEnable(GL_TEXTURE_2D);
	glDisable(GL_LIGHTING);

#endif

	glEnable(GL_DEPTH_TEST);
	glEnable(GL_CULL_FACE);

	// Did we find any trackables this frame?
	for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
	{
		isDetected = true;

		// Get the trackable:
		const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
		QCAR::Matrix44F modelViewMatrix =
		QCAR::Tool::convertPose2GLMatrix(trackable->getPose());

		// Choose the texture based on the target name:
		//int textureIndex = (!strcmp(trackable->getName(), "stones")) ? 0 : 1;

		const Texture* const thisTexture = textures[textureIndex];
		const Texture* const tagTexture = textures[textureCount - 1];

#ifdef USE_OPENGL_ES_1_1
		// Load projection matrix:
		glMatrixMode(GL_PROJECTION);
		glLoadMatrixf(projectionMatrix.data);

		// Load model view matrix:
		glMatrixMode(GL_MODELVIEW);
		glLoadMatrixf(modelViewMatrix.data);
		glTranslatef(0.f, 0.f, kObjectScale);
		glScalef(kObjectScale, kObjectScale, kObjectScale);

		// Draw object:
		glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
		glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &planeTexCoords[0]);
		glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &planeVertices[0]);
		glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &planeNormals[0]);
		glDrawElements(GL_TRIANGLES, NUM_PLANE_OBJECT_INDEX, GL_UNSIGNED_SHORT,
				(const GLvoid*) &planeIndices[0]);

		//		// Load model view matrix:
		//		glMatrixMode(GL_MODELVIEW);
		//		glLoadMatrixf(modelViewMatrix.data);
		//		glTranslatef(50.f, 50.f, kObjectScale);
		//		glScalef(1, 1, 1);
		//
		//		// Draw object:
		//		glBindTexture(GL_TEXTURE_2D, tagTexture->mTextureID);
		//		glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &planeTexCoords[0]);
		//		glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &planeVertices[0]);
		//		glNormalPointer(GL_FLOAT, 0, (const GLvoid*) &planeNormals[0]);
		//		glDrawElements(GL_TRIANGLES, NUM_PLANE_OBJECT_INDEX, GL_UNSIGNED_SHORT,
		//				(const GLvoid*) &planeIndices[0]);
#else

		QCAR::Matrix44F modelViewMatrix2;

		for (int i = 0; i < 16; i++)
		{
			modelViewMatrix2.data[i] = modelViewMatrix.data[i];
		}

		QCAR::Matrix44F modelViewProjection;
		QCAR::Matrix44F modelViewProjection2;

		SampleUtils::translatePoseMatrix(translateX, translateY, kObjectScale,
				&modelViewMatrix.data[0]);
		SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale,
				&modelViewMatrix.data[0]);

		SampleUtils::rotatePoseMatrix(angleX, 0, 1, 0,
				&modelViewMatrix.data[0]);
		SampleUtils::rotatePoseMatrix(angleY, 1, 0, 0,
				&modelViewMatrix.data[0]);

//.........这里部分代码省略.........
开发者ID:Josiastech,项目名称:EasyQCAR,代码行数:101,代码来源:ImageTargets.cpp


示例9: if

JNIEXPORT void JNICALL
Java_name_nailgun_irrlichtvuforia_Renderer_renderFrame(JNIEnv *, jobject)
{
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    QCAR::Renderer::getInstance().drawVideoBackground();
       
#ifndef DONT_SAVE_STATE
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_NORMAL_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);

    glEnable(GL_DEPTH_TEST);
    //glEnable(GL_CULL_FACE);
#endif

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
    {
        // Get the trackable:
        const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
        QCAR::Matrix44F modelViewMatrix =
            QCAR::Tool::convertPose2GLMatrix(trackable->getPose());        

        // Choose the texture based on the target name:
        int textureIndex;
        if (strcmp(trackable->getName(), "chips") == 0)
        {
            textureIndex = 0;
        }
        else if (strcmp(trackable->getName(), "stones") == 0)
        {
            textureIndex = 1;
        }
        else
        {
            textureIndex = 2;
        }

        if (!mDevice->run()) {
            // TODO: error
        }

        irr::core::matrix4& cameraMatrix = mTransformationNode->getRelativeTransformationMatrix();
        cameraMatrix.setM(modelViewMatrix.data);

        mDriver->beginScene(false, true);
        mSceneManager->drawAll();
        mDriver->endScene();
    }

#ifndef DONT_SAVE_STATE        
    glDisable(GL_DEPTH_TEST);

    glDisable(GL_TEXTURE_2D);
    glDisableClientState(GL_VERTEX_ARRAY);
    glDisableClientState(GL_NORMAL_ARRAY);
    glDisableClientState(GL_TEXTURE_COORD_ARRAY);
#endif

    QCAR::Renderer::getInstance().end();
}
开发者ID:nailgun,项目名称:android_irrlicht_vuforia,代码行数:64,代码来源:MainActivity.cpp


示例10: if

// Does all the rendering stuff
JNIEXPORT void JNICALL
Java_edu_ethz_s3d_S3DRenderer_renderFrame(JNIEnv *, jobject)
{
	glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
	glEnable(GL_BLEND);

    // Clear color and depth buffer 
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    
    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();
       
#ifdef USE_OPENGL_ES_1_1
    // Set GL11 flags:
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_NORMAL_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);
        
#endif

    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);

    // TODO: Only work, when there is exactly one trackable
    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
    {
        // Get the trackable:
        const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
        QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackable->getPose());

        // Choose the texture based on the target name:
        int textureIndex;
        if (strcmp(trackable->getName(), "chips") == 0)
        {
            textureIndex = 0;
        }
        else if (strcmp(trackable->getName(), "stones") == 0)
        {
            textureIndex = 1;
        }
        else
        {
            textureIndex = 2;
        }

        const Texture* const thisTexture = textures[textureIndex];

#ifdef USE_OPENGL_ES_1_1
        // Load projection matrix:
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // Load model view matrix:
        glMatrixMode(GL_MODELVIEW);
        glLoadMatrixf(modelViewMatrix.data);
        glTranslatef(0.f, 0.f, kObjectScale);
        glScalef(kObjectScale, kObjectScale, kObjectScale);

        // Draw object:
        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
        glTexCoordPointer(2, GL_FLOAT, 0, (const GLvoid*) &teapotTexCoords[0]);
        glVertexPointer(3, GL_FLOAT, 0, (const GLvoid*) &teapotVertices[0]);
        glNormalPointer(GL_FLOAT, 0,  (const GLvoid*) &teapotNormals[0]);
        glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT,
                       (const GLvoid*) &teapotIndices[0]);
#else

        // Calculate the projection matrix
        QCAR::Matrix44F modelViewProjection;
        SampleUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScale, &modelViewMatrix.data[0]);
        SampleUtils::scalePoseMatrix(kObjectScale, kObjectScale, kObjectScale, &modelViewMatrix.data[0]);
        SampleUtils::multiplyMatrix(&projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);

        // Calculate the camera position
        QCAR::Matrix44F inverseModelView = SampleMath::Matrix44FTranspose(SampleMath::Matrix44FInverse(modelViewMatrix));
        QCAR::Vec3F cameraPosition(inverseModelView.data[12], inverseModelView.data[13], inverseModelView.data[14]);

        // Select the shader program
        glUseProgram(shaderProgramID);

        // Load the vertex attributes
        glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) &teapotVertices[0]);
        glVertexAttribPointer(vertexColorHandle,3,GL_FLOAT, GL_FALSE, 0, (const GLvoid*) &color[0]);
        glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) &teapotNormals[0]);

        // Enable the vertex attributes
        glEnableVertexAttribArray(vertexHandle);
        glEnableVertexAttribArray(normalHandle);
        glEnableVertexAttribArray(vertexColorHandle);
        
        // Load the reconstruction data
        if (reconstructionHandler != NULL) {
//.........这里部分代码省略.........
开发者ID:Alice-,项目名称:CVL,代码行数:101,代码来源:ImageTargets.cpp


示例11: glClear

JNIEXPORT void JNICALL
Java_com_mx_ipn_escom_ars_recorrido_ArsRenderer_renderFrame2(JNIEnv *env, jobject obj)
{
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    QCAR::Renderer::getInstance().drawVideoBackground();

#ifdef USE_OPENGL_ES_1_1
    // Set GL11 flags:
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_NORMAL_ARRAY);
//    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    glEnable(GL_TEXTURE_2D);
    glEnable(GL_LIGHTING);
//    glDisable(GL_LIGHTING);

#endif

    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
    {
        // Get the trackable:
        const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
        QCAR::Matrix44F modelViewMatrix =
            QCAR::Tool::convertPose2GLMatrix(trackable->getPose());

#ifdef USE_OPENGL_ES_1_1
        // Load projection matrix:
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix2.data);

        // Load model view matrix:
        glMatrixMode(GL_MODELVIEW);
        glLoadMatrixf(modelViewMatrix.data);
        glTranslatef(0.f, 0.f, kObjectScale);
        glScalef(kObjectScale, kObjectScale, kObjectScale);

        jclass javaClass = env->GetObjectClass(obj);

        char *buf = (char*)malloc(10);
        strcpy(buf, trackable->getName());
   	    jstring jstrBuf = env->NewStringUTF(buf);

	    jmethodID method1 = env->GetMethodID(javaClass, "prueba2", "(Ljava/lang/String;)V");

	    env->CallVoidMethod(obj, method1,jstrBuf);
	    env->DeleteLocalRef(jstrBuf);

#endif

    }

    glDisable(GL_DEPTH_TEST);

#ifdef USE_OPENGL_ES_1_1
//    glDisable(GL_TEXTURE_2D);
    glDisableClientState(GL_VERTEX_ARRAY);
    glDisableClientState(GL_NORMAL_ARRAY);
#endif

    QCAR::Renderer::getInstance().end();
}
开发者ID:csantiagoalva,项目名称:namelezz,代码行数:66,代码来源:ArsReconocer.cpp


示例12: glClear

JNIEXPORT void JNICALL
Java_com_siu_android_arapp_vuforia_ImageTargetsRenderer_renderFrame(JNIEnv* env, jobject object)
{
    //LOG("Java_com_siu_android_arapp_vuforia_GLRenderer_renderFrame");

    // Clear color and depth buffer 
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();
    
    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();
       
#ifdef USE_OPENGL_ES_1_1
    // Set GL11 flags:
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_NORMAL_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);

    glEnable(GL_TEXTURE_2D);
    glDisable(GL_LIGHTING);
        
#endif

//    glEnable(GL_DEPTH_TEST);
//
//    // We must detect if background reflection is active and adjust the culling direction.
//    // If the reflection is active, this means the post matrix has been reflected as well,
//    // therefore standard counter clockwise face culling will result in "inside out" models.
//    glEnable(GL_CULL_FACE);
//    glCullFace(GL_BACK);
//    if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
//        glFrontFace(GL_CW);  //Front camera
//    else
//        glFrontFace(GL_CCW);   //Back camera


    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
        const QCAR::Trackable& trackable = result->getTrackable();

        QCAR::Matrix34F pose = result->getPose();
        QCAR::Vec3F position(pose.data[3], pose.data[7], pose.data[11]);
        float distance = sqrt(position.data[0] * position.data[0] +
                              position.data[1] * position.data[1] +
                              position.data[2] * position.data[2]);
        //LOG("DISTANCE: %f", distance);

        jclass clazz = env->FindClass("com/siu/android/arapp/vuforia/ImageTargetsRenderer");
        if (clazz == 0) {
            LOG("FindClass error");
            return;
        }

        jmethodID jmethod = env->GetMethodID(clazz, "objectDetected", "(Ljava/lang/String;F)V");
        if (jmethod == 0) {
            LOG("GetMethodID error");
            return;
        }

        jstring s = env->NewStringUTF(trackable.getName());

        env->CallVoidMethod(object, jmethod, s, distance);

//        QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
//
//        // Choose the texture based on the target name:
//        int textureIndex;
//        if (strcmp(trackable.getName(), "chips") == 0)
//        {
//            textureIndex = 0;
//        }
//        else if (strcmp(trackable.getName(), "stones") == 0)
//        {
//            textureIndex = 1;
//        }
//        else
//        {
//            textureIndex = 2;
//        }
//
//        const Texture* const thisTexture = textures[textureIndex];
//
//#ifdef USE_OPENGL_ES_1_1
//        // Load projection matrix:
//        glMatrixMode(GL_PROJECTION);
//        glLoadMatrixf(projectionMatrix.data);
//
//        // Load model view matrix:
//        glMatrixMode(GL_MODELVIEW);
//        glLoadMatrixf(modelViewMatrix.data);
//        glTranslatef(0.f, 0.f, kObjectScale);
//        glScalef(kObjectScale, kObjectScale, kObjectScale);
//
//        // Draw object:
//        glBindTexture(GL_TEXTURE_2D, thisTexture->mTextureID);
//.........这里部分代码省略.........
开发者ID:lukaspili-brookes,项目名称:AR-Navigation-app,代码行数:101,代码来源:ImageTargets.cpp


示例13: glClear

// ----------------------------------------------------------------------------
// renderFrame Method - Takes care of drawing in the different render states
// ----------------------------------------------------------------------------
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_CloudRecognition_CloudRecoRenderer_renderFrame(JNIEnv *, jobject)
{
    // Clear color and depth buffer
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();

    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();

    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);

    if (deleteCurrentProductTexture)
    {
        // Deletes the product texture if necessary
        if (productTexture != 0)
        {
            glDeleteTextures(1, &(productTexture->mTextureID));
            delete productTexture;
            productTexture = 0;
        }

        deleteCurrentProductTexture = false;
    }

    // If the render state indicates that the texture is generated it generates
    // the OpenGL texture for start drawing the plane with the book data
    if (renderState == RS_TEXTURE_GENERATED)
    {
        generateProductTextureInOpenGL();
    }

    // Did we find any trackables this frame?
    if (state.getNumTrackableResults() > 0)
    {
        trackingStarted = true;

        // If we are already tracking something we don't need
        // to wait any frame before starting the 2D transition
        // when the target gets lost
        pthread_mutex_lock(&framesToSkipMutex);
        framesToSkipBeforeRenderingTransition = 0;
        pthread_mutex_unlock(&framesToSkipMutex);

        // Gets current trackable result
        const QCAR::TrackableResult* trackableResult = state.getTrackableResult(0);

        if (trackableResult == NULL)
        {
            return;
        }

        modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        // Get the size of the ImageTarget
        QCAR::ImageTargetResult *imageResult = (QCAR::ImageTargetResult *)trackableResult;
        targetSize = imageResult->getTrackable().getSize();

        // Renders the Augmentation View with the 3D Book data Panel
        renderAugmentation(trackableResult);

    }
    else
    {
        // Manages the 3D to 2D Transition initialization
        if (!scanningMode && showAnimation3Dto2D && renderState == RS_NORMAL
                && framesToSkipBeforeRenderingTransition == 0)
        {
            startTransitionTo2D();
        }

        // Reduces the number of frames to wait before triggering
        // the transition by 1
        if( framesToSkipBeforeRenderingTransition > 0 && renderState == RS_NORMAL)
        {
            pthread_mutex_lock(&framesToSkipMutex);
            framesToSkipBeforeRenderingTransition -= 1;
            pthread_mutex_unlock(&framesToSkipMutex);
        }

    }

    // Logic for rendering Transition to 2D
    if (renderState == RS_TRANSITION_TO_2D && showAnimation3Dto2D)
    {
        renderTransitionTo2D();
    }

    // Logic for rendering Transition to 3D
    if (renderState == RS_TRANSITION_TO_3D )
    {
        renderTransitionTo3D();
    }

//.........这里部分代码省略.........
开发者ID:rxgranda,项目名称:LumiV2,代码行数:101,代码来源:CloudReco.cpp


示例14: glClear

JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv *, jobject)
{
    //LOG("Java_com_qualcomm_QCARSamples_VideoPlayback_GLRenderer_renderFrame");

    // Clear color and depth buffer
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Get the state from QCAR and mark the beginning of a rendering section
    QCAR::State state = QCAR::Renderer::getInstance().begin();

    // Explicitly render the Video Background
    QCAR::Renderer::getInstance().drawVideoBackground();

    glEnable(GL_DEPTH_TEST);

    // We must detect if background reflection is active and adjust the culling direction.
    // If the reflection is active, this means the post matrix has been reflected as well,
    // therefore standard counter clockwise face culling will result in "inside out" models.
    glEnable(GL_CULL_FACE);
    glCullFace(GL_BACK);
    if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
        glFrontFace(GL_CW);  //Front camera
    else
        glFrontFace(GL_CCW);   //Back camera


    for (int i=0; i<NUM_TARGETS; i++)
    {
        isTracking[i] = false;
        targetPositiveDimensions[i].data[0] = 0.0;
        targetPositiveDimensions[i].data[1] = 0.0;
    }

    // Did we find any trackables this frame?
    for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
    {
        // Get the trackable:
        const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);

        const QCAR::ImageTarget& imageTarget = (const QCAR::ImageTarget&) trackableResult->getTrackable();

        int currentTarget;

        // We store the modelview matrix to be used later by the tap calculation
        if (strcmp(imageTarget.getName(), "stones") == 0)
            currentTarget=STONES;
        else
            currentTarget=CHIPS;

        modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());

        isTracking[currentTarget] = true;

        targetPositiveDimensions[currentTarget] = imageTarget.getSize();

        // The pose delivers the center of the target, thus the dimensions
        // go from -width/2 to width/2, same for height
        targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
        targetPositiveDimensions[currentTarget].data[1] /= 2.0f;


        // If the movie is ready to start playing or it has reached the end
        // of playback we render the keyframe
        if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
                (currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
        {
            QCAR::Matrix44F modelViewMatrixKeyframe =
                QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
            QCAR::Matrix44F modelViewProjectionKeyframe;
            SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
                                             &modelViewMatrixKeyframe.data[0]);

            // Here we use the aspect ratio of the keyframe since it
            // is likely that it is not a perfect square

            float ratio=1.0;
            if (textures[currentTarget]->mSuccess)
                ratio = keyframeQuadAspectRatio[currentTarget];
            else
                ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];

            SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
                                         targetPositiveDimensions[currentTarget].data[0]*ratio,
                                         targetPositiveDimensions[currentTarget].data[0],
                                         &modelViewMatrixKeyframe.data[0]);
            SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
                                        &modelViewMatrixKeyframe.data[0] ,
                                        &modelViewProjectionKeyframe.data[0]);

            glUseProgram(keyframeShaderID);

            // Prepare for rendering the keyframe
            glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadVertices[0]);
            glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadNormals[0]);
            glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
                                  (const GLvoid*) &quadTexCoords[0]);

//.........这里部分代码省略.........
开发者ID:TomBers,项目名称:InvisibleGalleryAndroid,代码行数:101,代码来源:VideoPlayback.cpp


示例15: if

JNIEXPORT void JNICALL
Java_rajawali_vuforia_RajawaliVuforiaRenderer_renderFrame(JNIEnv* env,
		jobject object, jint frameBufferId, int frameBufferTextureId) {

	//LOG("Java_com_qualcomm_QCARSamples_FrameMarkers_GLRenderer_renderFrame");
	jclass ownerClass = env->GetObjectClass(object);

	// Clear color and depth buffer
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Get the state from QCAR and mark the beginning of a rendering section
	QCAR::State state = QCAR::Renderer::getInstance().begin();

	glBindFramebuffer(GL_FRAMEBUFFER, frameBufferId);
	glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
			frameBufferTextureId, 0);

	// Explicitly render the Video Background
	QCAR::Renderer::getInstance().drawVideoBackground();

	jfloatArray modelViewMatrixOut = env->NewFloatArray(16);

	// Did we find any trackables this frame?
	for (int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++) {
		// Get the trackable:
		const QCAR::TrackableResult* trackableResult = state.getTrackableResult(
				tIdx);
		const QCAR::Trackable& trackable = trackableResult->getTrackable();
		QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(
				trackableResult->getPose());
		if (isActivityInPortraitMode)
			Utils::rotatePoseMatrix(90.0f, 0, 1.0f, 0,
					&modelViewMatrix.data[0]);
		Utils::rotatePoseMatrix(-90.0f, 1.0f, 0, 0, &modelViewMatrix.data[0]);

		if (trackable.isOfType(QCAR::Marker::getClassType())) {
			jmethodID foundFrameMarkerMethod = env->GetMethodID(ownerClass,
					"foundFrameMarker", "(I[F)V");
			env->SetFloatArrayRegion(modelViewMatrixOut, 0, 16,
					modelViewMatrix.data);
			env->CallVoidMethod(object, foundFrameMarkerMethod,
					(jint) trackable.getId(), modelViewMatrixOut);
		} else if (trackable.isOfType(QCAR::CylinderTarget::getClassType())
				|| trackable.isOfType(QCAR::ImageTarget::getClassType())
				|| trackable.isOfType(QCAR::MultiTarget::getClassType())) {
			jmethodID foundImageMarkerMethod = env->GetMethodID(ownerClass,
					"foundImageMarker", "(Ljava/lang/String;[F)V");
			env->SetFloatArrayRegion(modelViewMatrixOut, 0, 16,
					modelViewMatrix.data);
			const char* trackableName = trackable.getName();
			jstring trackableNameJava = env->NewStringUTF(trackableName);
			env->CallVoidMethod(object, foundImageMarkerMethod,
					trackableNameJava, modelViewMatrixOut);
		}
	}
	env->DeleteLocalRef(modelViewMatrixOut);

	if (state.getNumTrackableResults() == 0) {
		jmethodID noFrameMarkersFoundMethod = env->GetMethodID(ownerClass,
				"noFrameMarkersFound", "()V");
		env->CallVoidMethod(object, noFrameMarkersFoundMethod);
	}

	glBindFramebuffer(GL_FRAMEBUFFER, 0);

	QCAR::Renderer::getInstance().end();
}
开发者ID:AhmadHegazy,项目名称:RajawaliVuforia,代码行数:67,代码来源:RajawaliVuforia.cpp


示例16: atan

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ qcar::TargetFinder类代码示例发布时间:2022-05-31
下一篇:
C++ qcar::ImageTracker类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap