// Just returns 1 for OpenGLEs 1.1 and 2 for other
JNIEXPORT bool JNICALL
Java_edu_ethz_s3d_S3D_hasTarget(JNIEnv *, jobject)
{
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
return (state.getNumActiveTrackables() > 0);
}
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_Dominoes_DominoesRenderer_renderFrame(JNIEnv* , jobject)
{
// Clear the color and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
// to no to hide
std::vector<const char*> found;
// Did we find any trackables this frame?
if (state.getNumTrackableResults() > 0) {
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); ++tIdx) {
// Get the first trackable
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = trackableResult->getTrackable();
found.push_back(trackable.getName());
// The image target specific result:
assert(trackableResult->getType() == QCAR::TrackableResult::IMAGE_TARGET_RESULT);
const QCAR::ImageTargetResult* imageTargetResult =
static_cast<const QCAR::ImageTargetResult*>(trackableResult);
// If this is our first time seeing the target, display a tip
if (!displayedMessage) {
displayMessage("Find marker man!");
displayedMessage = true;
}
//const QCAR::TrackerManager& trackerManager = QCAR::TrackerManager::getInstance();
//const QCAR::Tracker* tracker = trackerManager.getTracker(QCAR::Tracker::IMAGE_TRACKER);
const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::Vec2F cameraPoint = QCAR::Tool::projectPoint(cameraCalibration, trackableResult->getPose(), QCAR::Vec3F(0,0,0));
QCAR::Vec2F xyPoint = cameraPointToScreenPoint(cameraPoint);
showTrackerButton(xyPoint.data[0], xyPoint.data[1], trackable.getName());
}
} else {
hideTrackerButton(found);
}
QCAR::Renderer::getInstance().end();
}
JNIEXPORT void JNICALL
Java_edu_pugetsound_vichar_ar_ARGameRenderer_renderFrame(JNIEnv * env, jobject obj, jboolean updated, jfloatArray test, jint objSize)
{
bool update;
update = (bool) updated; //so we know whether or not to update the drawlist.
float testScale = 0.3f;
// here is an example of how to pull the elements out of the jfloatArray. I think c++ will implicitly handle the type casting of jfloats as floats,
// but if you are getting errors, you can always explicitly type cast them like so (assuming you have jfloats in the array):
// float x;
// x = (float) posData[i];
if(update){
int i = 0;
int j = 0;
jsize len = env->GetArrayLength(test);
jfloat* posData = env->GetFloatArrayElements(test, 0);
while(i<len && posData[(i/objSize)*objSize] != 0){
LOG("JSON to JNI test. Pos. %d : %f", i, posData[i]); //print the elements of the array.
interpList[i/objSize][i%objSize]= (float) posData[i] * testScale;
i++;
}
interpLength=(i)/objSize;
LOG("%i", interpLength);
env->ReleaseFloatArrayElements(test, posData, 0); //release memory
}
//LOG("Java_edu_pugetsound_vichar_ar_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
#ifdef USE_OPENGL_ES_1_1
// Set GL11 flags:
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnable(GL_TEXTURE_2D);
glDisable(GL_LIGHTING);
#endif
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumActiveTrackables(); tIdx++)
{
// Get the trackable:
const QCAR::Trackable* trackable = state.getActiveTrackable(tIdx);
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(trackable->getPose());
//Begin additions by Erin================================================================================
QCAR::Matrix34F test; //gets inverse pos matrix
QCAR::Matrix34F pos; //Gets positional data
pos = trackable->getPose();
//Get inverse
test = SampleMath::phoneCoorMatrix(trackable->getPose());
//Print results
// LOG("Poisiton:");
// LOG("%f %f %f %f",pos.data[0], pos.data[1], pos.data[2], pos.data[3]);
// LOG("%f %f %f %f",pos.data[4], pos.data[5], pos.data[6], pos.data[7]);
// LOG("%f %f %f %f",pos.data[8], pos.data[9], pos.data[10],pos.data[11]);
// LOG("Inverse:");
// LOG("%f %f %f %f",test.data[0], test.data[1], test.data[2], test.data[3]);
// LOG("%f %f %f %f",test.data[4], test.data[5], test.data[6], test.data[7]);
// LOG("%f %f %f %f",test.data[8], test.data[9], test.data[10], test.data[11]);
// LOG("=========================");
phoneLoc[0] = 1.0f;
phoneLoc[1] = test.data[3];
phoneLoc[2] = test.data[7];
phoneLoc[3] = test.data[11];
//End============================================================================================
// Assign Textures according in the texture indices defined at the beginning of the file, and based
// on the loadTextures() method in ARGameActivity.java.
const Texture* const tower_shellTexture = textures[tower_shellIndex];
const Texture* const tower_topTexture = textures[tower_topIndex];
const Texture* const bananaTexture = textures[banana180Index];
#ifdef USE_OPENGL_ES_1_1
// Load projection matrix:
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix.data);
// Load model view matrix:
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(modelViewMatrix.data);
glTranslatef(0.f, 0.f, kObjectScale);
//.........这里部分代码省略.........
JNIEXPORT void JNICALL
Java_com_codered_ared_TextRecoRenderer_renderFrame(JNIEnv * env, jobject obj)
{
//LOG("JJava_com_codered_ared_TextRecoRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We need Front Face, CW for the back camera and Front Face CCW for the front camera...
// or more accuratly, we need CW for 0 and 2 reflections and CCW for 1 reflection
glEnable(GL_CULL_FACE);
glCullFace(GL_FRONT);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
{
glFrontFace(GL_CCW); //Front camera
}
else
{
glFrontFace(GL_CW); //Back camera
}
// Enable blending to support transparency
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
jclass rendererJavaClass = env->GetObjectClass(obj);
env->CallVoidMethod(obj, env->GetMethodID(rendererJavaClass, "wordsStartLoop", "()V"));
NbWordsFound = 0;
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix =
QCAR::Tool::convertPose2GLMatrix(result->getPose());
QCAR::Vec2F wordBoxSize(0, 0);
if (result->getType() == QCAR::TrackableResult::WORD_RESULT)
{
const QCAR::WordResult* wordResult = (const QCAR::WordResult*) result;
// Get the word
const QCAR::Word& word = wordResult->getTrackable();
const QCAR::Obb2D& obb = wordResult->getObb();
wordBoxSize = word.getSize();
if (word.getStringU())
{
// in portrait, the obb coordinate is based on
// a 0,0 position being in the upper right corner
// with :
// X growing from top to bottom and
// Y growing from right to left
//
// we convert those coordinates to be more natural
// with our application:
// - 0,0 is the upper left corner
// - X grows from left to right
// - Y grows from top to bottom
float wordx = - obb.getCenter().data[1];
float wordy = obb.getCenter().data[0];
// For debugging purposes convert the string to 7bit ASCII
// (if possible) and log it.
char* stringA = 0;
if (unicodeToAscii(word, stringA))
{
// we store the word
if (NbWordsFound < MAX_NB_WORDS)
{
struct WordDesc * word = & WordsFound[NbWordsFound];
NbWordsFound++;
strncpy(word->text, stringA, MAX_WORD_LENGTH - 1);
word->text[MAX_WORD_LENGTH - 1] = '\0';
word->Ax = wordx - (int)(wordBoxSize.data[0] / 2);
word->Ay = wordy - (int)(wordBoxSize.data[1] / 2);
word->Bx = wordx + (int)(wordBoxSize.data[0] / 2);
word->By = wordy + (int)(wordBoxSize.data[1] / 2);
}
delete[] stringA;
}
}
}
else
{
//.........这里部分代码省略.........
// ----------------------------------------------------------------------------
// renderFrame Method - Takes care of drawing in the different render states
// ----------------------------------------------------------------------------
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_CloudRecognition_CloudRecoRenderer_renderFrame(JNIEnv *, jobject)
{
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
if (deleteCurrentProductTexture)
{
// Deletes the product texture if necessary
if (productTexture != 0)
{
glDeleteTextures(1, &(productTexture->mTextureID));
delete productTexture;
productTexture = 0;
}
deleteCurrentProductTexture = false;
}
// If the render state indicates that the texture is generated it generates
// the OpenGL texture for start drawing the plane with the book data
if (renderState == RS_TEXTURE_GENERATED)
{
generateProductTextureInOpenGL();
}
// Did we find any trackables this frame?
if (state.getNumTrackableResults() > 0)
{
trackingStarted = true;
// If we are already tracking something we don't need
// to wait any frame before starting the 2D transition
// when the target gets lost
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition = 0;
pthread_mutex_unlock(&framesToSkipMutex);
// Gets current trackable result
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(0);
if (trackableResult == NULL)
{
return;
}
modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
// Get the size of the ImageTarget
QCAR::ImageTargetResult *imageResult = (QCAR::ImageTargetResult *)trackableResult;
targetSize = imageResult->getTrackable().getSize();
// Renders the Augmentation View with the 3D Book data Panel
renderAugmentation(trackableResult);
}
else
{
// Manages the 3D to 2D Transition initialization
if (!scanningMode && showAnimation3Dto2D && renderState == RS_NORMAL
&& framesToSkipBeforeRenderingTransition == 0)
{
startTransitionTo2D();
}
// Reduces the number of frames to wait before triggering
// the transition by 1
if( framesToSkipBeforeRenderingTransition > 0 && renderState == RS_NORMAL)
{
pthread_mutex_lock(&framesToSkipMutex);
framesToSkipBeforeRenderingTransition -= 1;
pthread_mutex_unlock(&framesToSkipMutex);
}
}
// Logic for rendering Transition to 2D
if (renderState == RS_TRANSITION_TO_2D && showAnimation3Dto2D)
{
renderTransitionTo2D();
}
// Logic for rendering Transition to 3D
if (renderState == RS_TRANSITION_TO_3D )
{
renderTransitionTo3D();
}
//.........这里部分代码省略.........
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_VideoPlayback_VideoPlaybackRenderer_renderFrame(JNIEnv *, jobject)
{
//LOG("Java_com_qualcomm_QCARSamples_VideoPlayback_GLRenderer_renderFrame");
// Clear color and depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the post matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
for (int i=0; i<NUM_TARGETS; i++)
{
isTracking[i] = false;
targetPositiveDimensions[i].data[0] = 0.0;
targetPositiveDimensions[i].data[1] = 0.0;
}
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* trackableResult = state.getTrackableResult(tIdx);
const QCAR::ImageTarget& imageTarget = (const QCAR::ImageTarget&) trackableResult->getTrackable();
int currentTarget;
// We store the modelview matrix to be used later by the tap calculation
if (strcmp(imageTarget.getName(), "stones") == 0)
currentTarget=STONES;
else
currentTarget=CHIPS;
modelViewMatrix[currentTarget] = QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
isTracking[currentTarget] = true;
targetPositiveDimensions[currentTarget] = imageTarget.getSize();
// The pose delivers the center of the target, thus the dimensions
// go from -width/2 to width/2, same for height
targetPositiveDimensions[currentTarget].data[0] /= 2.0f;
targetPositiveDimensions[currentTarget].data[1] /= 2.0f;
// If the movie is ready to start playing or it has reached the end
// of playback we render the keyframe
if ((currentStatus[currentTarget] == READY) || (currentStatus[currentTarget] == REACHED_END) ||
(currentStatus[currentTarget] == NOT_READY) || (currentStatus[currentTarget] == ERROR))
{
QCAR::Matrix44F modelViewMatrixKeyframe =
QCAR::Tool::convertPose2GLMatrix(trackableResult->getPose());
QCAR::Matrix44F modelViewProjectionKeyframe;
SampleUtils::translatePoseMatrix(0.0f, 0.0f, targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
// Here we use the aspect ratio of the keyframe since it
// is likely that it is not a perfect square
float ratio=1.0;
if (textures[currentTarget]->mSuccess)
ratio = keyframeQuadAspectRatio[currentTarget];
else
ratio = targetPositiveDimensions[currentTarget].data[1] / targetPositiveDimensions[currentTarget].data[0];
SampleUtils::scalePoseMatrix(targetPositiveDimensions[currentTarget].data[0],
targetPositiveDimensions[currentTarget].data[0]*ratio,
targetPositiveDimensions[currentTarget].data[0],
&modelViewMatrixKeyframe.data[0]);
SampleUtils::multiplyMatrix(&projectionMatrix.data[0],
&modelViewMatrixKeyframe.data[0] ,
&modelViewProjectionKeyframe.data[0]);
glUseProgram(keyframeShaderID);
// Prepare for rendering the keyframe
glVertexAttribPointer(keyframeVertexHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadVertices[0]);
glVertexAttribPointer(keyframeNormalHandle, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadNormals[0]);
glVertexAttribPointer(keyframeTexCoordHandle, 2, GL_FLOAT, GL_FALSE, 0,
(const GLvoid*) &quadTexCoords[0]);
//.........这里部分代码省略.........
请发表评论