本文整理汇总了Java中android.media.FaceDetector.Face类的典型用法代码示例。如果您正苦于以下问题:Java Face类的具体用法?Java Face怎么用?Java Face使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Face类属于android.media.FaceDetector包,在下文中一共展示了Face类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。
示例1: face2Rect
import android.media.FaceDetector.Face; //导入依赖的package包/类
/**
* The Android face detector returns a {@link Face} object, this function converts it to a rectangle.
* @param f Android Face parameter.
* @param imgwidth Image width.
* @param imgheight Image height.
* @return Return the rectangle that contains the face
*/
private Rect face2Rect(Face f, int imgwidth, int imgheight)
{
f.getMidPoint(p);
double eyesDistance = f.eyesDistance();
int x, y, width, height;
x = (int)(Math.floor(p.x-(1.0*eyesDistance)));
y = (int)(Math.floor(p.y-(1.0*eyesDistance)));
width = (int)Math.ceil(2.0*eyesDistance);
height = (int)Math.ceil(3.0f*eyesDistance);
if(x < 0)
x = 0;
if(y < 0)
y = 0;
if((y + height) > imgheight)
height = (int)(imgheight - y);
if((x + width) > imgwidth)
width = (int)(imgwidth - x);
Rect r = new Rect();
r.set(x, y, x+width, y+height);
return r;
}
开发者ID:uberspot,项目名称:Offline3fAuth,代码行数:33,代码来源:SkinFaceDetector.java
示例2: SkinFaceDetector
import android.media.FaceDetector.Face; //导入依赖的package包/类
/**
* Creates an instance of {@link SkinFaceDetector}.
*/
public SkinFaceDetector(){
width = 640;
height = 480;
myFace = new FaceDetector.Face [NUMBER_OF_FACES];//acha ateh 4 faces numa imagem
myFaceDetect = new FaceDetector(width, height, NUMBER_OF_FACES);
faces = new LinkedList<Rect>();
p = new PointF();
time = 0;
}
开发者ID:uberspot,项目名称:Offline3fAuth,代码行数:14,代码来源:SkinFaceDetector.java
示例3: onFaceDetection
import android.media.FaceDetector.Face; //导入依赖的package包/类
@Override
public void onFaceDetection(android.hardware.Camera.Face[] faces,
Camera camera) {
boolean isfacedet = StoredData.getBoolean(StoredData.M_FACETRACKING,
false);
// System.out.println("是否可以人脸识别:" + isfacedet);
if (!isfacedet || (m_cameraIndex == m_camera_front)) {
WiCameraActivity.m_fd_face.setVisibility(View.GONE);
return;
}
if (m_isfacedetection) {
WiCameraActivity.m_fd_face.setVisibility(View.VISIBLE);
// TODO Auto-generated method stub
// TODO Auto-generated method stub
Rect[] rectarrayRects = new Rect[faces.length];
for (int i = 0; i < faces.length; i++) {
rectarrayRects[i] = faces[i].rect;
}
// TODO Auto-generated method stub
boolean isneedfocus = isNeedFocus(oldRect, rectarrayRects);
WiCameraActivity.m_fd_face.setFaces(faces);
if (isneedfocus) {
if ((mFocusArea != null)
&& (FOCU_STATE == STATE_SUCCESS
|| FOCU_STATE == STATE_FAIL || FOCU_STATE == STATE_FOCUSING)) {
mFocusArea = null;
mMeteringArea = null;
cameras.cancelAutoFocus();
FOCU_STATE = STATE_IDLE;
}
mFocusArea = new ArrayList<Camera.Area>();
mMeteringArea = new ArrayList<Camera.Area>();
if (cameras != null) {
for (int i = 0; i < faces.length; i++) {
mFocusArea.add(new Area(faces[i].rect, 100));
mMeteringArea.add(new Area(faces[i].rect, 100));
}
}
setArea();
}
oldRect = rectarrayRects;
}
}
开发者ID:hubert1002,项目名称:WiCamera3D,代码行数:46,代码来源:VideoSurfaceView2D.java
示例4: onFaceDetection
import android.media.FaceDetector.Face; //导入依赖的package包/类
@Override
public void onFaceDetection(android.hardware.Camera.Face[] faces,
Camera camera) {
boolean isfacedet = StoredData.getBoolean(StoredData.M_FACETRACKING,
false);
// System.out.println("是否可以人脸识别:" + isfacedet);
if (!isfacedet
|| (m_cameraIndex == m_camera_front
|| Util.CAMERA_STATE == CAMERA_VIDEO
|| WiCameraActivity.isContinus || (!WiCameraActivity.isCameraOpen))) {
WiCameraActivity.m_fd_face.setVisibility(View.GONE);
return;
}
if (m_isfacedetection) {
WiCameraActivity.m_fd_face.setVisibility(View.VISIBLE);
// if (FOCU_STATE == STATE_FOCUSING) {
// WiCameraActivity.m_fd_face.setVisibility(View.GONE);
// return;
// }
// TODO Auto-generated method stub
// TODO Auto-generated method stub
Rect[] rectarrayRects = new Rect[faces.length];
for (int i = 0; i < faces.length; i++) {
rectarrayRects[i] = faces[i].rect;
}
// TODO Auto-generated method stub
boolean isneedfocus = isNeedFocus(oldRect, rectarrayRects);
WiCameraActivity.m_fd_face.setFaces(faces);
if (isneedfocus) {
if ((mFocusArea != null)
&& (FOCU_STATE == STATE_SUCCESS || FOCU_STATE == STATE_FAIL)) {
mFocusArea = null;
mMeteringArea = null;
cameras.cancelAutoFocus();
FOCU_STATE = STATE_IDLE;
}
mFocusArea = new ArrayList<Camera.Area>();
mMeteringArea = new ArrayList<Camera.Area>();
if (cameras != null) {
for (int i = 0; i < faces.length; i++) {
mFocusArea.add(new Area(faces[i].rect, 100));
mMeteringArea.add(new Area(faces[i].rect, 100));
}
}
// FOCU_STATE = STATE_FOCUSING;
setArea();
}
oldRect = rectarrayRects;
}
}
开发者ID:hubert1002,项目名称:WiCamera3D,代码行数:53,代码来源:VideoSurfaceView.java
示例5: cutFaces
import android.media.FaceDetector.Face; //导入依赖的package包/类
private Bitmap cutFaces(Bitmap photo) {
if (photo == null)
return null;
else {
try {
photo = convert(photo, Bitmap.Config.RGB_565);
Bitmap croped;
int finalHeight = BitmapHelper.convertDPtoPX(174, mBaseActivity);
// detect faces
float top = photo.getHeight();
FaceDetector detector = new FaceDetector(photo.getWidth(), photo.getHeight(), 5);
Face[] faces = new Face[6];
int facesCount = detector.findFaces(photo, faces);
for (int i = 0; i < facesCount; i++) {
if (faces[i] == null)
continue;
PointF point = new PointF();
faces[i].getMidPoint(point);
float y = point.y - faces[i].eyesDistance();
if (y < top)
top = y;
}
if (facesCount > 0) {
if (top + finalHeight > photo.getHeight())
top = photo.getHeight() - finalHeight;
croped = Bitmap.createBitmap(photo, 0, (int) top, photo.getWidth(), finalHeight);
} else {
int half = photo.getHeight() / 2 - finalHeight / 2;
if (photo.getHeight() <= finalHeight)
half = 0;
croped = Bitmap.createBitmap(photo, 0, half, photo.getWidth(), finalHeight);
}
return croped;
} catch (Exception e) {
return photo;
}
}
}
开发者ID:MatejVancik,项目名称:amaroKontrol,代码行数:42,代码来源:GetCollectionPhotosTask.java
示例6: detect
import android.media.FaceDetector.Face; //导入依赖的package包/类
@Override
public void detect(SharedBufferHandle frameData, final int width, final int height,
final DetectResponse callback) {
final long numPixels = (long) width * height;
// TODO(xianglu): https://crbug.com/670028 homogeneize overflow checking.
if (!frameData.isValid() || width <= 0 || height <= 0 || numPixels > (Long.MAX_VALUE / 4)) {
Log.d(TAG, "Invalid argument(s).");
callback.call(new FaceDetectionResult[0]);
return;
}
ByteBuffer imageBuffer = frameData.map(0, numPixels * 4, MapFlags.none());
if (imageBuffer.capacity() <= 0) {
Log.d(TAG, "Failed to map from SharedBufferHandle.");
callback.call(new FaceDetectionResult[0]);
return;
}
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
// An int array is needed to construct a Bitmap. However the Bytebuffer
// we get from |sharedBufferHandle| is directly allocated and does not
// have a supporting array. Therefore we need to copy from |imageBuffer|
// to create this intermediate Bitmap.
// TODO(xianglu): Consider worker pool as appropriate threads.
// http://crbug.com/655814
bitmap.copyPixelsFromBuffer(imageBuffer);
// A Bitmap must be in 565 format for findFaces() to work. See
// http://androidxref.com/7.0.0_r1/xref/frameworks/base/media/java/android/media/FaceDetector.java#124
//
// It turns out that FaceDetector is not able to detect correctly if
// simply using pixmap.setConfig(). The reason might be that findFaces()
// needs non-premultiplied ARGB arrangement, while the alpha type in the
// original image is premultiplied. We can use getPixels() which does
// the unmultiplication while copying to a new array. See
// http://androidxref.com/7.0.0_r1/xref/frameworks/base/graphics/java/android/graphics/Bitmap.java#538
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
final Bitmap unPremultipliedBitmap =
Bitmap.createBitmap(pixels, width, height, Bitmap.Config.RGB_565);
// FaceDetector creation and findFaces() might take a long time and trigger a
// "StrictMode policy violation": they should happen in a background thread.
AsyncTask.THREAD_POOL_EXECUTOR.execute(new Runnable() {
@Override
public void run() {
final FaceDetector detector = new FaceDetector(width, height, mMaxFaces);
Face[] detectedFaces = new Face[mMaxFaces];
// findFaces() will stop at |mMaxFaces|.
final int numberOfFaces = detector.findFaces(unPremultipliedBitmap, detectedFaces);
FaceDetectionResult[] faceArray = new FaceDetectionResult[numberOfFaces];
for (int i = 0; i < numberOfFaces; i++) {
faceArray[i] = new FaceDetectionResult();
final Face face = detectedFaces[i];
final PointF midPoint = new PointF();
face.getMidPoint(midPoint);
final float eyesDistance = face.eyesDistance();
faceArray[i].boundingBox = new RectF();
faceArray[i].boundingBox.x = midPoint.x - eyesDistance;
faceArray[i].boundingBox.y = midPoint.y - eyesDistance;
faceArray[i].boundingBox.width = 2 * eyesDistance;
faceArray[i].boundingBox.height = 2 * eyesDistance;
// TODO(xianglu): Consider adding Face.confidence and Face.pose.
faceArray[i].landmarks = new Landmark[0];
}
callback.call(faceArray);
}
});
}
开发者ID:mogoweb,项目名称:365browser,代码行数:77,代码来源:FaceDetectionImpl.java
示例7: setFaces
import android.media.FaceDetector.Face; //导入依赖的package包/类
public void setFaces(final Face[] faces) {
if ((faces != null) && (faces.length > 0)) {
mDetectedFaces = faces;
} else {
mDetectedFaces = null;
}
applyCrop();
}
开发者ID:Subito-it,项目名称:Masaccio,代码行数:14,代码来源:MasaccioImageView.java
示例8: getFaceOffsets
import android.media.FaceDetector.Face; //导入依赖的package包/类
private void getFaceOffsets(final Face[] faces, final float[] offsets, final float scaleFactor,
final float width, final float height, final float maxOffsetX, final float maxOffsetY) {
try {
Face bestFace = null;
float maxConfidence = 0;
for (final Face face : faces) {
final float faceConfidence = face.confidence();
if (faceConfidence > maxConfidence) {
maxConfidence = faceConfidence;
bestFace = face;
}
}
if (bestFace == null) {
getDefaultOffsets(offsets, maxOffsetX, maxOffsetY);
return;
}
final PointF midPoint = new PointF();
bestFace.getMidPoint(midPoint);
final float scaledOffsetX =
(midPoint.x * scaleFactor) - ((width - maxOffsetX) * FACE_POSITION_RATIO_X);
final float scaledOffsetY =
(midPoint.y * scaleFactor) - ((height - maxOffsetY) * FACE_POSITION_RATIO_Y);
if (Math.round(maxOffsetX) >= 0) {
offsets[0] = Math.min(Math.max(0, scaledOffsetX), maxOffsetX);
} else {
offsets[0] = scaledOffsetX;
}
if (Math.round(maxOffsetY) >= 0) {
offsets[1] = Math.min(Math.max(0, scaledOffsetY), maxOffsetY);
} else {
offsets[1] = scaledOffsetY;
}
} catch (final Exception e) {
getDefaultOffsets(offsets, maxOffsetX, maxOffsetY);
}
}
开发者ID:Subito-it,项目名称:Masaccio,代码行数:62,代码来源:MasaccioImageView.java
示例9: process
import android.media.FaceDetector.Face; //导入依赖的package包/类
@Override
public Face[] process(final Bitmap bitmap) {
final Map<Bitmap, Face[]> facesMap = mFacesMap;
final Face[] preProcessed = facesMap.get(bitmap);
if (preProcessed != null) {
if (preProcessed == NO_FACES) {
return null;
}
return preProcessed;
}
final Face[] faces = new Face[MAX_FACES];
final Bitmap bitmap565 = convertTo565(bitmap);
if (bitmap565 != null) {
final FaceDetector faceDetector =
new FaceDetector(bitmap565.getWidth(), bitmap565.getHeight(), MAX_FACES);
final int faceCount = faceDetector.findFaces(bitmap565, faces);
if (faceCount > 0) {
final Face[] detected = new Face[faceCount];
System.arraycopy(faces, 0, detected, 0, faceCount);
facesMap.put(bitmap, detected);
return detected;
}
}
facesMap.put(bitmap, NO_FACES);
return null;
}
开发者ID:Subito-it,项目名称:Masaccio,代码行数:45,代码来源:MasaccioImageView.java
示例10: getFaces
import android.media.FaceDetector.Face; //导入依赖的package包/类
private Face[] getFaces(final Bitmap bitmap) {
return mFacesMap.get(bitmap);
}
开发者ID:Subito-it,项目名称:Masaccio,代码行数:5,代码来源:MasaccioImageView.java
注:本文中的android.media.FaceDetector.Face类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论