diff --git a/Chapter2_iPhoneAR/Example_MarkerBasedAR/Example_MarkerBasedAR/VideoSource.mm b/Chapter2_iPhoneAR/Example_MarkerBasedAR/Example_MarkerBasedAR/VideoSource.mm index 5459b7f..2fe6b1a 100644 --- a/Chapter2_iPhoneAR/Example_MarkerBasedAR/Example_MarkerBasedAR/VideoSource.mm +++ b/Chapter2_iPhoneAR/Example_MarkerBasedAR/Example_MarkerBasedAR/VideoSource.mm @@ -146,7 +146,7 @@ - (bool) startWithDevicePosition:(AVCaptureDevicePosition)devicePosition AVCaptureDeviceInput *videoIn = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error]; self.deviceInput = videoIn; - if (!error) + if (nil != videoIn) { if ([[self captureSession] canAddInput:videoIn]) { @@ -160,7 +160,7 @@ - (bool) startWithDevicePosition:(AVCaptureDevicePosition)devicePosition } else { - NSLog(@"Couldn't create video input"); + NSLog(@"Couldn't create video input: %@", [error localizedDescription]); return FALSE; } } diff --git a/Chapter4_StructureFromMotion/Triangulation.cpp b/Chapter4_StructureFromMotion/Triangulation.cpp index 45f6118..b77ef6d 100755 --- a/Chapter4_StructureFromMotion/Triangulation.cpp +++ b/Chapter4_StructureFromMotion/Triangulation.cpp @@ -73,9 +73,11 @@ Mat_ IterativeLinearLSTriangulation(Point3d u, //homogenous image point ) { double wi = 1, wi1 = 1; Mat_ X(4,1); - for (int i=0; i<10; i++) { //Hartley suggests 10 iterations at most - Mat_ X_ = LinearLSTriangulation(u,P,u1,P1); - X(0) = X_(0); X(1) = X_(1); X(2) = X_(2); X(3) = 1.0; + + Mat_ X_ = LinearLSTriangulation(u,P,u1,P1); + X(0) = X_(0); X(1) = X_(1); X(2) = X_(2); X(3) = 1.0; + + for (int i=0; i<10; i++) { //Hartley suggests 10 iterations at most //recalculate weights double p2x = Mat_(Mat_(P).row(2)*X)(0); diff --git a/Chapter6_NonRigidFaceTracking/README.txt b/Chapter6_NonRigidFaceTracking/README.txt index 606c23d..7567897 100755 --- a/Chapter6_NonRigidFaceTracking/README.txt +++ b/Chapter6_NonRigidFaceTracking/README.txt @@ -37,8 +37,123 @@ Windows (MS Visual Studio): - A static library will be written to the "lib" directory. - The execuables can be found in the "bin" directory. + ---------------------------------------------------------- Running the various programs: ---------------------------------------------------------- * On Linux or Mac: ./bin/program_name --help * On Windows: bin\Debug\program_name --help + + +---------------------------------------------------------- +Mini tutorial: +---------------------------------------------------------- +(Follow these steps in the order 1 to 10) + +1. Create "annotations.yaml": +--------- +Usage: +> ./annotate [-v video] [-m muct_dir] [-d output_dir] +- video: video containing frames to annotate. +- muct_dir: directory containing "muct-landmarks/muct76-opencv.csv", the pre-annotated MUCT dataset (http://www.milbo.org/muct/). +- output_dir: contains the annotation file and annotated images (if using -v) +Example: +> mkdir muct +> ./annotate -m ${MY_MUCT_DIR}/ -d muct/ + +2. Visualise annotations: +---------- +Usage: +> ./visualise_annotation annotation_file +Example: +> ./visualize_annotations muct/annotations.yaml +Keys: +- 'p': show next image and annotations +- 'o': show previous image and annotations +- 'f': show flipped image and annotations + +3. Train shape model: +---------- +Usage: +> ./train_shape_model annotation_file shape_model_file [-f fraction_of_variation] [-k maximum_modes] [--mirror] +- annotation_file: generated by "annotate" +- shape_model_file: output YAML file containing trained shape model +- fraction_of_variation: A fraction between 0 and 1 specifying ammount of variation to retain +- maximum_modes: A cap on the number of modes the shape model can have +- mirror: Use mirrored images as samples (only use if symmety points were specified in "annotate") +Example: +> ./train_shape_model muct/annotations.yaml muct/shape_model.yaml + +4. Visualise shape model: +----------- +Usage: +> ./visualise_shape_model shape_model +- shape_model: generated using "train_shape_model" +Example: +> ./visualize_shape_model muct/shape_model.yaml + +5. Train patch detectors: +-------------- +Usage: +> ./train_patch_model annotation_file shape_model_file patch_model_file[-w face_width] [-p patch_size] [-s search_window_size] [--mirror] +- annotation_file: generated by "annotate" +- shape_model_file: generated by "train_shape_model" +- patch_model_file: output YAML file containing trained patch model +- face_width: How many pixels-wide the reference face is +- patch_size: How many pixels-wide the patches are in the reference face image +- search_window_Size: How many pixels-wide the search region is +- mirror: Use mirrored images as samples (only use if symmety points were specified in "annotate") +Example: +> ./train_patch_model muct/annotations.yaml muct/shape_model.yaml muct/patch_model.yaml + +6. Visualise patch detectors: +------------ +Usage: +> ./visualise_patch_model patch_model [-w face_width] +- patch_model: generated using "train_patch_model" +- face_width: Width of face to visualise patches on +Example: +> ./visualize_patch_model muct/patch_model.yaml + +7. Build face detector: +------------ +Usage: +> ./train_face_detector detector_file annotation_file shape_model_file detector_model_file [-f min_frac_of_pts_in_det_rect] [--mirror] +- detector_file: pre-trained OpenCV cascade face detector (look in the data directory of the OpenCV package) +- annotation_file: generated using "annotate" +- shape_model_file: generated using "train_shape_model" +- detector_model_file: output YAML file containing face detector model +- min_frac_of_pts_in_det_rect: Minimum fraction of points inside detection window for sample to be considered and inlier for training +- mirror: Use mirrored images as samples (only use if symmety points were specified in "annotate") +Example: +> ./train_face_detector ${MY_OPENCV_DIR}/data/lbpcascades/lbpcascade_frontalface.xml muct/annotations.yaml muct/shape_model.yaml muct/detector.yaml + +8. Visualise face detector: +------------ +Usage: +> ./visualise_face detector [video_file] +- detector: generated using "train_face_detector" +- video_file: Optional video to test results on. Default is to use webcam +Example: +> ./visualize_face_detector muct/detector.yaml + +9. Train face tracker: +----------- +Usage: +> ./train_face_tracker shape_model_file patch_models_file face_detector_file face_tracker_file +- shape_model_file: generated using "train_shape_model" +- patch_model_file: generated using "train_patch_model" +- face_detector_file: generated using "train_face_detector" +- face_tracker_file: output YAML file containing face tracker model +Example: +> ./train_face_tracker muct/shape_model.yaml muct/patch_model.yaml muct/detector.yaml muct/tracker.yaml + +10. Test face tracker: +---------- +Usage: +> ./visualise_face_tracker tracker [video_file] +- tracker: generated using "train_face_tracker" +- video_file: Optional video to test tracker on. Default is to use webcam +Example: +./visualize_face_tracker muct/tracker.yaml + diff --git a/README.md b/README.md index 25d32bc..80af3db 100755 --- a/README.md +++ b/README.md @@ -12,8 +12,8 @@ Full source-code for the book. -------------------------------------------------------------------------------- To build & run the projects for the book: -------------------------------------------------------------------------------- -- Install OpenCV v2.4.2 or later. eg: go to "http://opencv.org/", click on - Downloads, download the latest OpenCV (including prebuilt library), and extract +- Install OpenCV (versions between 2.4.2 to 2.4.11 are supported, whereas OpenCV 3.0 is not yet supported). eg: go to "http://opencv.org/", click on + Downloads, download the latest OpenCV 2.4 version (including prebuilt library), and extract it to "C:\OpenCV" for Windows or "~/OpenCV" for Linux. In OpenCV v2.4.3, the prebuilt OpenCV library is in "C:\OpenCV\build" or "~/OpenCV/build", such as "C:\OpenCV\build\x64\vc9" for MS Visual Studio 2008 (or "vs10" folder for MS