Archive

Posts Tagged ‘detection’

mctune: multicore hyperparameter tuning in R on the example of SVM car detection

April 20, 2014 1 comment

mctune

In Machine Learning (ML) tasks finding good hyperparameters for machine learning models is critical (hyperparameter optimization). In R there exist some packages containing routines doing that for you using grid search (constructing and testing all possible parameters as a grid, e.g. in David Meyer’s e1071 package).

Besides the very good routines already contained in those packages a while ago I liked to have multicore hyperparameter tuning and needed true positive rate (true match rate) and true negative rate (true non match rate) alongside overall error rate. Therefore I modified the tune method of the e1071 package to a) use multicore processing and b) return confusion matrizes showing average true positive / true negative rates per sample class and parameter setting.

The script inherits it’s GPL-2 licence from it’s original version. I modified tune specifically for my own needs when tuning SVM hyperparameters, therefore did not validate it for correctness or how it will work with other tune-able functions (e.g. KNN, random forest).

# modified version of "tune" from package "e1071"
# licence: GPL-2
# details: http://cran.r-project.org/web/packages/e1071/index.html
mctune <- function(method, train.x, train.y = NULL, data = list(),
                 validation.x = NULL, validation.y = NULL,
                 ranges = NULL, predict.func = predict,
                 tunecontrol = tune.control(),
                 mc.control=NULL,
                 confusionmatrizes=F,
                 ...
) {
  call <- match.call()

  require('plyr')
  require('parallel')

  ## internal helper functions
  resp <- function(formula, data) {
    model.response(model.frame(formula, data))
  }

  classAgreement2 <- function (tab) {
    n <- sum(tab)
    # correct classification rate
    if (!is.null(dimnames(tab))) {
      lev <- intersect(colnames(tab), rownames(tab))
      d <- diag(tab[lev, lev])
      p0 <- sum(d) / n
    } else {
      m <- min(dim(tab))
      d <- diag(tab[1:m, 1:m])
      p0 <- sum(d) / n
    }
    # confusion matrizes
    if(!confusionmatrizes) {
      list(p0=p0)
    } else if(is.null(dimnames(tab))) {
      stop('tables without dimension names are not allowed when generating confusionmatrizes.')
    }
    else {
      # generate confusion matrix for each class
      classnames <- unique(unlist(dimnames(tab)))
      truepositives <-  unlist(lapply(classnames, function(positiveclassname) { sum(d[positiveclassname])}))
      falsepositives <- unlist(lapply(classnames, function(positiveclassname) { sum(tab[positiveclassname,])-tab[positiveclassname,positiveclassname]}))
      falsenegatives <- unlist(lapply(classnames, function(positiveclassname) { sum(tab[,positiveclassname])-tab[positiveclassname,positiveclassname]}))
      truenegatives <- mapply(FUN=function(tp,fn,fp){ sum(tab)-tp-fn-fp }, truepositives, falsenegatives, falsepositives)
      confusions <- data.frame(classnames, truepositives, truenegatives, falsepositives, falsenegatives, row.names=NULL)
      colnames(confusions) <- c('class', 'tp', 'tn', 'fp', 'fn')
      list(p0=p0, confusions=confusions)
    }
  }

  ## parameter handling
  if (tunecontrol$sampling == "cross")
    validation.x <- validation.y <- NULL
  useFormula <- is.null(train.y)
  if (useFormula && (is.null(data) || length(data) == 0))
    data <- model.frame(train.x)
  if (is.vector(train.x)) train.x <- t(t(train.x))
  if (is.data.frame(train.y))
    train.y <- as.matrix(train.y)

  ## prepare training indices
  if (!is.null(validation.x)) tunecontrol$fix <- 1
  n <- (nrow(if (useFormula) data
            else train.x))
  perm.ind <- sample(n)
  if (tunecontrol$sampling == "cross") {
    if (tunecontrol$cross > n)
      stop(sQuote("cross"), " must not exceed sampling size!")
    if (tunecontrol$cross == 1)
      stop(sQuote("cross"), " must be greater than 1!")
  }
  train.ind <- (if (tunecontrol$sampling == "cross")
                  tapply(1:n, cut(1:n, breaks = tunecontrol$cross), function(x) perm.ind[-x])
                else if (tunecontrol$sampling == "fix")
                  list(perm.ind[1:trunc(n * tunecontrol$fix)])
                else
                ## bootstrap
                  lapply(1:tunecontrol$nboot,
                         function(x) sample(n, n * tunecontrol$boot.size, replace = TRUE))
  )

  ## find best model
  parameters <- (if(is.null(ranges))
                  data.frame(dummyparameter = 0)
                else
                  expand.grid(ranges))
  p <- nrow(parameters)
  if (!is.logical(tunecontrol$random)) {
    if (tunecontrol$random < 1)
      stop("random must be a strictly positive integer")
    if (tunecontrol$random > p) tunecontrol$random <- p
    parameters <- parameters[sample(1:p, tunecontrol$random),]
  }

  ## - loop over all models
  # concatenate arbitrary mc-arguments with explicit X and FUN arguments
  train_results<-do.call(what="mclapply", args=c(mc.control, list(X=1:p, ..., FUN=function(para.set) {
    sampling.errors <- c()
    sampling.confusions <- c()

    ## - loop over all training samples
    for (sample in 1:length(train.ind)) {
      repeat.errors <- c()
      repeat.confusions <- c()

      ## - repeat training `nrepeat' times
      for (reps in 1:tunecontrol$nrepeat) {

        ## train one model
        pars <- if (is.null(ranges))
          NULL
        else
          lapply(parameters[para.set,,drop = FALSE], unlist)

        model <- if (useFormula)
          do.call(method, c(list(train.x,
                                 data = data,
                                 subset = train.ind[[sample]]),
                            pars, list(...)
          )
          )
        else
          do.call(method, c(list(train.x[train.ind[[sample]],],
                                 y = train.y[train.ind[[sample]]]),
                            pars, list(...)
          )
          )

        ## predict validation set
        pred <- predict.func(model,
                             if (!is.null(validation.x))
                               validation.x
                             else if (useFormula)
                               data[-train.ind[[sample]],,drop = FALSE]
                             else if (inherits(train.x, "matrix.csr"))
                               train.x[-train.ind[[sample]],]
                             else
                               train.x[-train.ind[[sample]],,drop = FALSE]
        )

        ## compute performance measure
        true.y <- if (!is.null(validation.y))
          validation.y
        else if (useFormula) {
          if (!is.null(validation.x))
            resp(train.x, validation.x)
          else
            resp(train.x, data[-train.ind[[sample]],])
        } else
          train.y[-train.ind[[sample]]]

        if (is.null(true.y)) true.y <- rep(TRUE, length(pred))

        if (!is.null(tunecontrol$error.fun))
          repeat.errors[reps] <- tunecontrol$error.fun(true.y, pred)
        else if ((is.logical(true.y) || is.factor(true.y)) && (is.logical(pred) || is.factor(pred) || is.character(pred))) { ## classification error
          l <- classAgreement2(table(pred, true.y))
          repeat.errors[reps] <- (1 - l$p0) # wrong classification rate
          if(confusionmatrizes) {
            repeat.confusions[[reps]] <- l$confusions
          }
        } else if (is.numeric(true.y) && is.numeric(pred)) ## mean squared error
          repeat.errors[reps] <- crossprod(pred - true.y) / length(pred)
        else
          stop("Dependent variable has wrong type!")
      }
      sampling.errors[sample] <- tunecontrol$repeat.aggregate(repeat.errors)
      # TODO potentially implement separate aggregation of tp tn fp fn values. currently those are taken with correlate to the least error.
      if(confusionmatrizes) {
        sampling.confusions[[sample]] <- repeat.confusions[repeat.errors == sampling.errors[sample]][[1]]
      }
    }
    # TODO potentially implement separate aggregation of tp tn fp fn values. currently uses the same as for error / variance aggregation
    if(!confusionmatrizes) {
      list(
        model.error=tunecontrol$sampling.aggregate(sampling.errors),
        model.variance=tunecontrol$sampling.dispersion(sampling.errors))
    } else {
      # create one confusion data frame
      confusions <- ldply(sampling.confusions, data.frame)
      # calculate aggregate / disperse values per class
      confusions <- ldply(lapply(X=split(confusions, confusions$class), FUN=function(classdf) {
        class=unique(classdf$class)
        # only take numeric values
        classdf[,c('tp','tn','fp','fn')]
        # calculate aggregate / disperse values for this class
        aggregated <- apply(X=classdf[,c('tp','tn','fp','fn')], MAR=2, FUN=tunecontrol$sampling.aggregate)
        dispersions <- apply(X=classdf[,c('tp','tn','fp','fn')], MAR=2, FUN=tunecontrol$sampling.dispersion)
        # make 1 row dataframe out of it (combine rows later with outer ldply)
        t(data.frame(c(value=aggregated,dispersion=dispersions)))
      }), data.frame)
      colnames(confusions) <- c('class', 'tp.value', 'tn.value', 'fp.value', 'fn.value', 'tp.dispersion', 'tn.dispersion', 'fp.dispersion', 'fn.dispersion')
      # calculate mean confusion matrix values (mean of all classes) for best model
      confusions.mean <- data.frame(t(apply(X=confusions[,c('tp.value','tn.value','fp.value','fn.value','tp.dispersion','tn.dispersion','fp.dispersion','fn.dispersion')], MAR=2, FUN=mean)))
      colnames(confusions.mean) <- c('tp.value', 'tn.value', 'fp.value', 'fn.value', 'tp.dispersion', 'tn.dispersion', 'fp.dispersion', 'fn.dispersion')
      list(
        model.error=tunecontrol$sampling.aggregate(sampling.errors),
        model.variance=tunecontrol$sampling.dispersion(sampling.errors),
        model.confusions=confusions,
        model.confusions.mean=confusions.mean
      )
    }
  })))
#   print('mctune: mclapply done.')
#   print(train_results)
  model.errors <- unlist(lapply(train_results,function(x)x$model.error))
  model.variances <- unlist(lapply(train_results,function(x)x$model.variance))
  if(confusionmatrizes){
    model.confusions <- lapply(train_results,function(x)x$model.confusions)
    model.confusions.mean <- ldply(lapply(train_results,function(x)x$model.confusions.mean), data.frame)
  }

  ## return results
  best <- which.min(model.errors)
  pars <- if (is.null(ranges))
    NULL
  else
    lapply(parameters[best,,drop = FALSE], unlist)
  structure(list(best.parameters  = parameters[best,,drop = FALSE],
                 best.performance = model.errors[best],
                 method           = if (!is.character(method))
                   deparse(substitute(method)) else method,
                 nparcomb         = nrow(parameters),
                 train.ind        = train.ind,
                 sampling         = switch(tunecontrol$sampling,
                                           fix = "fixed training/validation set",
                                           bootstrap = "bootstrapping",
                                           cross = if (tunecontrol$cross == n) "leave-one-out" else
                                             paste(tunecontrol$cross,"-fold cross validation", sep="")
                 ),
                 performances     = if (tunecontrol$performances) cbind(parameters, error = model.errors, dispersion = model.variances),
                 confusionmatrizes = if (confusionmatrizes) model.confusions,
                 confusionmatrizes.mean = if(confusionmatrizes) model.confusions.mean,
                 best.confusionmatrizes = if(confusionmatrizes) model.confusions[[best]],
                 best.confusionmatrizes.mean = if(confusionmatrizes) model.confusions.mean[best,],
                 best.model       = if (tunecontrol$best.model) {
                   modeltmp <- if (useFormula)
                     do.call(method, c(list(train.x, data = data),
                                       pars, list(...)))
                   else
                     do.call(method, c(list(x = train.x,
                                            y = train.y),
                                       pars, list(...)))
                   call[[1]] <- as.symbol("best.tune")
                   modeltmp$call <- call
                   modeltmp
                 }
  ),
            class = "tune"
  )
}

Additional parameters

mc.control

A list of parameters that go to mclapply from the parallel package. Example: mc.control=list(mc.cores=3, mc.preschedule=F)

confusionmatrizes

Binary flag indicating if confusion matrizes should be generated or not.

Additional return values

tune$confusionmatrizes

List of confusion matrizes. The matrizes are sorted the same way as tune$performances is. Each matrix is a data frame listing the sample classes (class), it’s corresponding absolute average values (value) and standard deviation (dispersion) for true positives (TP), true negatives (TN), false positives (FP) and false negatives (FN). An pre-drawn example of such a confusionmatrix from our car detection below could look like this (where “yes” means “car” and “no” means “no car”):

class tp.value tn.value fp.value fn.value tp.dispersion tn.dispersion fp.dispersion fn.dispersion
   no     49.5     53.4      1.6      0.5      5.212165      5.621388     0.8432740     0.5270463
  yes     53.4     49.5      0.5      1.6      5.621388      5.212165     0.5270463     0.8432740

In above example for the “yes” class the tp.value=53.4 means that on average 53.4 samples were correctly classified as “yes” using the corresponding parameter setting.

Car detection

As ML example we do a car detection. The task is to decide if a given image contains or not contains a car (therefore leaving out searching mechanisms like sliding window). As data source we use the UIUC Image Database for Car Detection. In order to reduce the amount of features (pixels) and “blur” images (which discards unnecessary details and likely even increases recognition probabilities) I resized the images from original 100×40 to 50×20 pixels. For a human it’s still easy to decide if an image shows a car or not at this scale:

No car 1

No car 1

No car 2

No car 2

No car 3

No car 3

Car 1

Car 1

Car 2

Car 2

Car 3

Car 3

 

 

As ML model we use a support vector machine with radial (Gaussian) kernel, which leaves us with the cost and gamma parameter to be tuned – which we do using a grid search and 10 fold cross validation.

# load data
print('loading data...')
library('png')
car_neg <- lapply(dir(pattern="neg.*png"),readPNG)
car_pos <- lapply(dir(pattern="pos.*png"),readPNG)
data <- c(car_neg,car_pos)
labels <- factor(c(rep('no',length(car_neg)), rep('yes', length(car_pos))))
# put data to dataframe
data <- t(array(unlist(data),dim=c(length(data[[1]]),length(data))))
data<-data.frame(data)
# look at a car
# image(array(as.numeric(data[600,]),dim=c(20,50)))
# look at correlation between first pixels
# plot(data.frame(data,labels)[,1:8],pch='.') 

library('e1071')
source('mctune.R')
t<-mctune(confusionmatrizes=T,
          mc.control=list(mc.cores=3, mc.preschedule=F),
          method=svm,
          ranges=list(type='C',
                      kernel='radial',
                      gamma=3^(-10:-4),
                      cost=3^(-8:8)),
          train.x=data,
          train.y=labels,
          validation.x=NULL, #validation.x and .y are only used with tune.control sampling='boot' and 'fixed'.
          validation.y=NULL,
          tunecontrol=tune.control(sampling='cross',
                                   cross=10,
                                   performances=T,
                                   nrepeat=1,
                                   best.model=T))
# extract FMR FNMR from our positive class
p <- lapply(X=t$confusionmatrizes, FUN=function(x){
  p <- x[x$class=='yes',]
  p[-1]
})
p <- ldply(p)
t$performances <- data.frame(t$performances, p)
t$performances$FMR <- t$performances$fp.value / (t$performances$tn.value + t$performances$fp.value)
t$performances$FNMR <- t$performances$fn.value / (t$performances$tp.value + t$performances$fn.value)
# print list of errors
t$performances[with(t$performances, order(error)),]

# different plots of parameters and errors
library('scatterplot3d')
scatterplot3d(t$performances$cost, t$performances$gamma, t$performances$error,log='xy', type='h', pch=1, color='red')
plot(t$performances[,3:5],log='xy',pch=4)
# paramters and errors: from best to worst
plot(t$performances[with(t$performances, order(error)),]$cost,log='y', col='blue')
points(t$performances[with(t$performances, order(error)),]$gamma, col='green')
points(t$performances[with(t$performances, order(error)),]$error, col='black')
# points(t$performances[with(t$performances, order(error)),]$FMR, col='red')
# points(t$performances[with(t$performances, order(error)),]$FNMR, col='orange')

Using this setup with the first search we obtain an error rate of about 0.02 for gamma 10^-3 and cost >= 3 (this indicates the search area for the next step). The corresponding false match rate (FMR) and false non match rate (FNMR) are in the range of 0.01 and 0.029.

Facedetection with JavaCV and different haarcascades on Android

November 5, 2011 2 comments

UPDATE (2015)

The pan shot face recognition prototype from 2013 (see below) has been embedded in the prototypical face module of the mobilesec android authentication framework. The face module uses 2D frontal-only face detection and authentication, but additionally showcases pan shot face detection and authentication. It currently uses Android 4.4 and OpenCV 2.4.10 for Android. Additionally to the functionality provided in the old prototype the module features (beside others) KNN/SVM classification, with training and classification both done on the device, more detail settings that can be changed/played with and direct access to authentication data stored on the FS in order to manage it (as the whole thing is still a demo/showcase).

Face module of the mobilesec Android authentication framework: https://github.com/mobilesec/authentication-framework-module-face

To cite the face authentication module please again use my master thesis:

Findling, R. D. Pan Shot Face Unlock: Towards Unlocking Personal Mobile Devices using Stereo Vision and Biometric Face Information from multiple Perspectives. Department of Mobile Computing, School of Informatics, Communication and Media, University of Applied Sciences Upper Austria, 2013

UPDATE (2014)

In 2013 I’ve finished my master thesis about the pan shot face unlock. As part of the thesis I’ve prototypically implemented several face detection and recognition prototypes, including the pan shot face recognition prototype for Android 4.3, using OpenCV 2.4.8 for Android. This prototype features the same functionality as the old face detection demo described in this post – but extends it by face recognition based on KNN or SVM, with training and classification both done on the device. For those reason you should stick to the new code available in the following repository: https://github.com/mobilesec/panshot-face-recognition-demo

Details on the background of the prototype are available in my master thesis:

Findling, R. D. Pan Shot Face Unlock: Towards Unlocking Personal Mobile Devices using Stereo Vision and Biometric Face Information from multiple Perspectives. Department of Mobile Computing, School of Informatics, Communication and Media, University of Applied Sciences Upper Austria, 2013

UPDATE (2013)

OpenCV now features Android support natively. Therefore you should start with OpenCV for Android (http://opencv.org/platforms/android.html) and add other haar or LBP cascades there (as also done in this post).

What is HaarCascadeTypes supposed to do?
The Android app “HaarCascadeTypes” extends the app “FacePreview” from the JavaCV project homepage. It’s a very small app that demonstrates which standard OpenCV haarcascades detect which types of faces (frontal, profile, …). As it is only a demo, it is not optimized in any way (e.g. it’s quite big).

The Application
The pictures below show what types of faces are detected by which haarcascades of OpenCV. The frame colors mean the usage of a specific haarcascade specification of OpenCV for the detected face:

  • Red: haarcascade_frontalface_alt.xml
  • Green: haarcascade_frontalface_alt2.xml
  • Blue: haarcascade_frontalface_alt_tree.xml
  • Yellow: haarcascade_frontalface_default.xml
  • White: haarcascade_profileface.xml

Download
You can download either the final apk or the complete source code of the project. In the apk only two classifiers are enabled: one for frontal, one for profile face detection. Note that the detection is rather slow as these two face detections are done separately. The android java part of the source is also attached at the bottom of the post for quick review. Important: the opencv-libraries delivered with the source and apk are working only on Android < 4.x. If you want to use it on Android 4.x, you will have to get the opencv libraries precompiled elsewhere or compile it on your own, which is obviously not the easiest task.
apk download, md5: 054292522a2062a3c6b9c6a4664a727e, sha1: 41759a699a2a1adf2e6ce3443ac427d32aae0aab
source download, md5: 78b67179e5e87ed6b1b2634c1b3f9d23, sha1: 71484d13f73ea37c0a73bd2c39aa5b30a3b27fe0

Compiling the source
There are several things you have to concern when compiling the source on your own: e.g. you need a working android environment. A detailed description of how to get JavaCV working for android is stated at the JavaCV project homepage.

The Android-Java part of the source (containing the JavaCV-API calls):

/*
 * Copyright (C) 2010,2011 Samuel Audet
 *
 * FacePreview - A fusion of OpenCV's facedetect and Android's CameraPreview
 * samples, with JavaCV + JavaCPP as the glue in between.
 *
 * This file was based on CameraPreview.java that came with the Samples for
 * Android SDK API 8, revision 1 and contained the following copyright notice:
 *
 * Copyright (C) 2007 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the &amp;amp;amp;amp;quot;License&amp;amp;amp;amp;quot;); you may not
 * use this file except in compliance with the License. You may obtain a copy of
 * the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an &amp;amp;amp;amp;quot;AS IS&amp;amp;amp;amp;quot; BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 *
 *
 * IMPORTANT - Make sure your AndroidManifiest.xml file includes the following:
 *
 *
 */

package com.googlecode.javacv.facepreview;

import static com.googlecode.javacv.cpp.opencv_core.IPL_DEPTH_8U;
import static com.googlecode.javacv.cpp.opencv_core.cvGetSeqElem;
import static com.googlecode.javacv.cpp.opencv_core.cvLoad;
import static com.googlecode.javacv.cpp.opencv_objdetect.CV_HAAR_DO_CANNY_PRUNING;
import static com.googlecode.javacv.cpp.opencv_objdetect.cvHaarDetectObjects;

import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;

import android.app.Activity;
import android.app.AlertDialog;
import android.content.Context;
import android.graphics.Canvas;
import android.graphics.Color;
import android.graphics.ImageFormat;
import android.graphics.Paint;
import android.hardware.Camera;
import android.hardware.Camera.Size;
import android.os.Bundle;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.View;
import android.view.Window;
import android.view.WindowManager;
import android.widget.FrameLayout;

import com.googlecode.javacpp.Loader;
import com.googlecode.javacv.cpp.opencv_core;
import com.googlecode.javacv.cpp.opencv_objdetect;
import com.googlecode.javacv.cpp.opencv_core.CvMemStorage;
import com.googlecode.javacv.cpp.opencv_core.CvRect;
import com.googlecode.javacv.cpp.opencv_core.CvSeq;
import com.googlecode.javacv.cpp.opencv_core.IplImage;
import com.googlecode.javacv.cpp.opencv_objdetect.CvHaarClassifierCascade;

// ----------------------------------------------------------------------

public class FacePreview extends Activity {

	// ANDROID
	private FrameLayout	layout;
	private FaceView	faceView;
	private Preview		mPreview;

	@Override
	protected void onCreate(Bundle savedInstanceState) {
		super.onCreate(savedInstanceState);

		getWindow().addFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN);

		// Hide the window title.
		requestWindowFeature(Window.FEATURE_NO_TITLE);

		// Create our Preview view and set it as the content of our activity.
		try {
			layout = new FrameLayout(this);
			faceView = new FaceView(this);
			mPreview = new Preview(this, faceView);
			layout.addView(mPreview);
			layout.addView(faceView);
			setContentView(layout);
		} catch (IOException e) {
			e.printStackTrace();
			new AlertDialog.Builder(this).setMessage(e.getMessage()).create().show();
		}
	}
}

// ----------------------------------------------------------------------

class FaceView extends View implements Camera.PreviewCallback {
	public static final int	SUBSAMPLING_FACTOR	= 4;

	private IplImage		grayImage;

	// HAARCASCADE TYPES
	public static enum Feature {
		FRONTALFACE_ALT, FRONTALFACE_ALT2, FRONTALFACE_ALT_TREE, FRONTALFACE_DEFAULT, PROFILEFACE
	}

	// HAARCASCADE MEMBERS
	private static HashMap				mClassifierFiles	= new HashMap();
	private static String								mClassifierPrefix	= &amp;amp;amp;amp;quot;/com/googlecode/javacv/facepreview/&amp;amp;amp;amp;quot;;
	static {
		 mClassifierFiles.put(Feature.FRONTALFACE_ALT, mClassifierPrefix +
		 &amp;amp;amp;amp;quot;haarcascade_frontalface_alt.xml&amp;amp;amp;amp;quot;);
		mClassifierFiles.put(Feature.PROFILEFACE, mClassifierPrefix + &amp;amp;amp;amp;quot;haarcascade_profileface.xml&amp;amp;amp;amp;quot;);
		 mClassifierFiles.put(Feature.FRONTALFACE_ALT_TREE, mClassifierPrefix
		 + &amp;amp;amp;amp;quot;haarcascade_frontalface_alt_tree.xml&amp;amp;amp;amp;quot;);
		mClassifierFiles.put(Feature.FRONTALFACE_ALT2, mClassifierPrefix + &amp;amp;amp;amp;quot;haarcascade_frontalface_alt2.xml&amp;amp;amp;amp;quot;);
		 mClassifierFiles.put(Feature.FRONTALFACE_DEFAULT, mClassifierPrefix +
		 &amp;amp;amp;amp;quot;haarcascade_frontalface_default.xml&amp;amp;amp;amp;quot;);
	}
	private HashMap						mFaces				= new HashMap();
	private HashMap				mStorages			= new HashMap();
	private HashMap	mClassifiers		= new HashMap();

	public FaceView(FacePreview context) throws IOException {
		super(context);

		// Preload the opencv_objdetect module to work around a known bug.
		Loader.load(opencv_objdetect.class);

		for (Feature f : mClassifierFiles.keySet()) {
			File classifierFile = Loader.extractResource(getClass(), mClassifierFiles.get(f), context.getCacheDir(),
					&amp;amp;amp;amp;quot;classifier&amp;amp;amp;amp;quot;, &amp;amp;amp;amp;quot;.xml&amp;amp;amp;amp;quot;);
			if (classifierFile == null || classifierFile.length() 				throw new IOException(&amp;amp;amp;amp;quot;Could not extract the classifier file from Java resource.&amp;amp;amp;amp;quot;);
			}
			mClassifiers.put(f, new CvHaarClassifierCascade(cvLoad(classifierFile.getAbsolutePath())));
			classifierFile.delete();
			if (mClassifiers.get(f).isNull()) {
				throw new IOException(&amp;amp;amp;amp;quot;Could not load the classifier file.&amp;amp;amp;amp;quot;);
			}
			mStorages.put(f, CvMemStorage.create());
		}
	}

	public void onPreviewFrame(final byte[] data, final Camera camera) {
		try {
			Camera.Size size = camera.getParameters().getPreviewSize();
			processImage(data, size.width, size.height);
			camera.addCallbackBuffer(data);
		} catch (RuntimeException e) {
			// The camera has probably just been released, ignore.
		}
	}

	protected void processImage(byte[] data, int width, int height) {
		// First, downsample our image and convert it into a grayscale IplImage
		int f = SUBSAMPLING_FACTOR;
		if (grayImage == null || grayImage.width() != width / f || grayImage.height() != height / f) {
			grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
		}
		int imageWidth = grayImage.width();
		int imageHeight = grayImage.height();
		int dataStride = f * width;
		int imageStride = grayImage.widthStep();
		ByteBuffer imageBuffer = grayImage.getByteBuffer();
		for (int y = 0; y &amp;amp;amp;amp;lt; imageHeight; y++) {
			int dataLine = y * dataStride;
			int imageLine = y * imageStride;
			for (int x = 0; x &amp;amp;amp;amp;lt; imageWidth; x++) {
				imageBuffer.put(imageLine + x, data[dataLine + f * x]);
			}
		}

		for (Feature feat : mClassifierFiles.keySet()) {
			mFaces.put(feat, cvHaarDetectObjects(grayImage, mClassifiers.get(feat), mStorages.get(feat), 1.1, 3,
					CV_HAAR_DO_CANNY_PRUNING));
			postInvalidate();
			opencv_core.cvClearMemStorage(mStorages.get(feat));
		}
	}

	@Override
	protected void onDraw(Canvas canvas) {
		Paint paint = new Paint();
		paint.setTextSize(20);

		String s = &amp;amp;amp;amp;quot;FacePreview - This side up.&amp;amp;amp;amp;quot;;
		float textWidth = paint.measureText(s);
		canvas.drawText(s, (getWidth() - textWidth) / 2, 20, paint);

		for (Feature f : mClassifierFiles.keySet()) {
			paint.setColor(featureColor(f));
			if (mFaces.get(f) != null) {
				paint.setStrokeWidth(2);
				paint.setStyle(Paint.Style.STROKE);
				float scaleX = (float) getWidth() / grayImage.width();
				float scaleY = (float) getHeight() / grayImage.height();
				int total = mFaces.get(f).total();
				for (int i = 0; i &amp;amp;amp;amp;lt; total; i++) { 					CvRect r = new CvRect(cvGetSeqElem(mFaces.get(f), i)); 					int x = r.x(), y = r.y(), w = r.width(), h = r.height(); 					canvas.drawRect(x * scaleX, y * scaleY, (x + w) * scaleX, (y + h) * scaleY, paint); 				} 			} 		} 	} 	private int featureColor(Feature _f) { 		switch (_f) { 			case FRONTALFACE_ALT: 				return Color.RED; 			case FRONTALFACE_ALT2: 				return Color.GREEN; 			case FRONTALFACE_ALT_TREE: 				return Color.BLUE; 			case FRONTALFACE_DEFAULT: 				return Color.YELLOW; 			case PROFILEFACE: 				return Color.WHITE; 			default: 				throw new NullPointerException(&amp;amp;amp;amp;quot;no color defined for this feature type: &amp;amp;amp;amp;quot; + _f); 		} 	} } // ---------------------------------------------------------------------- class Preview extends SurfaceView implements SurfaceHolder.Callback { 	SurfaceHolder			mHolder; 	Camera					mCamera; 	Camera.PreviewCallback	previewCallback; 	Preview(Context context, Camera.PreviewCallback previewCallback) { 		super(context); 		this.previewCallback = previewCallback; 		// Install a SurfaceHolder.Callback so we get notified when the 		// underlying surface is created and destroyed. 		mHolder = getHolder(); 		mHolder.addCallback(this); 		mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); 	} 	public void surfaceCreated(SurfaceHolder holder) { 		// The Surface has been created, acquire the camera and tell it where 		// to draw. 		mCamera = Camera.open(); 		try { 			mCamera.setPreviewDisplay(holder); 		} catch (IOException exception) { 			mCamera.release(); 			mCamera = null; 			// TODO: add more exception handling logic here 		} 	} 	public void surfaceDestroyed(SurfaceHolder holder) { 		// Surface will be destroyed when we return, so stop the preview. 		// Because the CameraDevice object is not a shared resource, it's very 		// important to release it when the activity is paused. 		mCamera.stopPreview(); 		mCamera.release(); 		mCamera = null; 	} 	private Size getOptimalPreviewSize(List sizes, int w, int h) { 		final double ASPECT_TOLERANCE = 0.05; 		double targetRatio = (double) w / h; 		if (sizes == null) 			return null; 		Size optimalSize = null; 		double minDiff = Double.MAX_VALUE; 		int targetHeight = h; 		// Try to find an size match aspect ratio and size 		for (Size size : sizes) { 			double ratio = (double) size.width / size.height; 			if (Math.abs(ratio - targetRatio) &amp;amp;amp;amp;gt; ASPECT_TOLERANCE)
				continue;
			if (Math.abs(size.height - targetHeight) &amp;amp;amp;amp;lt; minDiff) {
				optimalSize = size;
				minDiff = Math.abs(size.height - targetHeight);
			}
		}

		// Cannot find the one match the aspect ratio, ignore the requirement
		if (optimalSize == null) {
			minDiff = Double.MAX_VALUE;
			for (Size size : sizes) {
				if (Math.abs(size.height - targetHeight) &amp;amp;amp;amp;lt; minDiff) {
					optimalSize = size;
					minDiff = Math.abs(size.height - targetHeight);
				}
			}
		}
		return optimalSize;
	}

	public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {
		// Now that the size is known, set up the camera parameters and begin
		// the preview.
		Camera.Parameters parameters = mCamera.getParameters();

		List sizes = parameters.getSupportedPreviewSizes();
		Size optimalSize = getOptimalPreviewSize(sizes, w, h);
		parameters.setPreviewSize(optimalSize.width, optimalSize.height);

		mCamera.setParameters(parameters);
		if (previewCallback != null) {
			mCamera.setPreviewCallbackWithBuffer(previewCallback);
			Camera.Size size = parameters.getPreviewSize();
			byte[] data = new byte[size.width * size.height * ImageFormat.getBitsPerPixel(parameters.getPreviewFormat()) / 8];
			mCamera.addCallbackBuffer(data);
		}
		mCamera.startPreview();
	}
}