Fórum Opencv com c++ Reconhecimento facial com algoritimo fisherface sempre reconhece a mesma pessoa pra todas as pessoas mesmo com imagem identica de entrada e reconstroi uma imagem preta #570331
03/11/2016
0
Segue o código da função principal que é a que executa o loop pra identificação do rosto e de acordo com as entradas recebidas pelo Windows forms, o programa executa uma atividade:
void RecognitionAlgo::Running()
{
// Create the cascade classifier object used for the face detection
cv::CascadeClassifier face_cascade;
// Use the haarcascade frontalface_alt.xml library
if (!face_cascade.load("haarcascade_frontalface_alt.xml"))
{
Log("Error at face Cascade Load!")
}
// Setup image files used in the capture process
cv::Mat captureFrame;
cv::Mat grayscaleFrame;
cv::Mat shrinkFrame;
// Create a vector to store the face found
std::vector<cv::Rect> faces;
std::vector<cv::Mat> Preprocessed_Faces;
std::vector<cv::string> Preprocessed_Faces_Names;
cv::string myname;
std::vector<int> faceLabels;
// Init the face Recognizer
cv::initModule_contrib();
std::string facerecAlgorithm = "FaceRecognizer.Fisherfaces";
cv::Ptr<cv::FaceRecognizer> model;
// Use OpenCV's new FaceRecognizer in the "contrib" module;
model = cv::Algorithm::create<cv::FaceRecognizer>(facerecAlgorithm);
if (model.empty())
{
std::cerr << "ERROR: FaceRecognizer" << std::endl;
}
try
{
//model->load("TrainedModel.xml");
}
catch (const std::exception&)
{
}
// Create a loop to caoture and find faces
while (true)
{
// Capture a new image frame
if (swCamera)
{
captureDevice >> captureFrame;
}
else
{
captureFrame = cv::imread("face3.PNG");
}
// Shrink the captured image, Convert to gray scale and equalize
cv::cvtColor(captureFrame, grayscaleFrame, CV_BGR2GRAY);
cv::equalizeHist(grayscaleFrame, grayscaleFrame);
shrinkFrame = ShrinkingImage(grayscaleFrame);
// Find faces on the shrink image (because it's faster) and store them in the vector array
face_cascade.detectMultiScale(shrinkFrame, faces, 1.1, 4, CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
if (faces.size() > 0)
faces = EnlargeResults(captureFrame, faces);
// Draw a rectangle for all found faces in the vector array on original image
for (int i = 0; i < faces.size(); i++)
{
cv::Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
cv::Point pt2(faces[i].x, faces[i].y);
cv::Mat theFace = grayscaleFrame(faces[i]);
// try to treat the face by identifying the eyes, if the eyes fail to detect, it returns theface
cv::Mat filtered = TreatmentForFace(theFace);
// Collecting faces and learning from them.
if (starTraining && TrainName != "")
{
if (colFace)
{
Preprocessed_Faces.push_back(filtered);
if (myname == "")
{
myname = TrainName;
}
colFace = false;
}
}
else
{
if (!starTraining && Preprocessed_Faces.size() > 0)
{
// create the person folder
std::string command = "mkdir ";
std::string foldercom = "TrainingFolder\\\\" + myname;
command += foldercom;
system(command.c_str());
// create a string to access the recent created folder
std::string foldername = foldercom.substr(0, foldercom.size() - (myname.size() + 1));
foldername.append("/");
foldername.append(myname);
foldername.append("/");
foldername.append(myname);
// save the colected faces on the folder
for (int i = 0; i < Preprocessed_Faces.size(); i++)
{
std::ostringstream oss;
oss << i;
cv::imwrite(foldername + oss.str() + ".PNG", Preprocessed_Faces[i]);
}
myname = "";
Preprocessed_Faces.clear();
}
}
if (traiModel)
{
// read all folders on the images and for each one give an number id
// read the images for each folder and add on a vector of images
// read the folder name and give to all images to set the label info
Preprocessed_Faces.clear();
faceLabels.clear();
Preprocessed_Faces_Names.clear();
std::string folder = "TrainingFolder\\\\";
std::vector<cv::string> foldernames;
foldernames = get_all_files_names_within_folder(folder);
std::vector<int> labels;
for (int f = 0; f < foldernames.size(); f++)
{
std::string thisfoldername = folder + foldernames[f];
std::vector<cv::string> filenames;
cv::glob(thisfoldername, filenames);
Preprocessed_Faces_Names.push_back(foldernames[f]);
labels.push_back(f + 1);
for (int fn = 0; fn < filenames.size(); fn++)
{
Preprocessed_Faces.push_back(cv::imread(filenames[fn]));
//std::cout << filenames[fn] << std::endl;
faceLabels.push_back(f + 1);
}
}
cv::imwrite("Traintest.PNG", Preprocessed_Faces[0]);
std::map<int, std::string> map1;
for (int i = 0; i < Preprocessed_Faces_Names.size(); i++)
{
map1.insert(std::pair<int, std::string>(labels[i], Preprocessed_Faces_Names[i]));
std::cout << Preprocessed_Faces_Names[i] << std::endl;
}
model->setLabelsInfo(map1);
model->train(Preprocessed_Faces, faceLabels);
traiModel = false;
}
if (identif)
{
// identify the current face looking on the database
// Prediction Validation
// Get some required data from the FaceRecognizer model.
cv::Mat eigenvectors = model->get<cv::Mat>("eigenvectors");
cv::Mat averageFaceRow = model->get<cv::Mat>("mean");
// Project the input image onto the eigenspace.
cv::Mat projection = cv::subspaceProject(eigenvectors, averageFaceRow, filtered.reshape(1, 1));
// Generate the reconstructed face back from the eigenspace.
cv::Mat reconstructionRow = cv::subspaceReconstruct(eigenvectors, averageFaceRow, projection);
// Make it a rectangular shaped image instead of a single row.
cv::Mat reconstructionMat = reconstructionRow.reshape(1, filtered.rows);
// Convert the floating-point pixels to regular 8-bit uchar.
cv::Mat reconstructedFace = cv::Mat(reconstructionMat.size(), CV_8U);
reconstructionMat.convertTo(reconstructedFace, CV_8U, 1, 0);
cv::imwrite("Teste.PNG", filtered);
cv::imwrite("Teste2.PNG", reconstructedFace);
int identity = model->predict(filtered);
double similarity = getSimilarity(filtered, reconstructedFace);
if (similarity > .7f)
{
//identity = -1; // -1 means that the face is not registred in the trainer
}
std::cout << "This is: " << identity << " and: " << model->getLabelInfo(identity) << std::endl;
identif = false;
}
}
// Print the output
cv::resize(captureFrame, captureFrame, cv::Size(800, 600));
cv::imshow("outputCapture", caFelipe Brasil
Curtir tópico
+ 1Posts
03/11/2016
Felipe Brasil
Gostei + 0
21/09/2021
Marcos
Segue o código da função principal que é a que executa o loop pra identificação do rosto e de acordo com as entradas recebidas pelo Windows forms, o programa executa uma atividade:
void RecognitionAlgo::Running()
{
// Create the cascade classifier object used for the face detection
cv::CascadeClassifier face_cascade;
// Use the haarcascade frontalface_alt.xml library
if (!face_cascade.load("haarcascade_frontalface_alt.xml"))
{
Log("Error at face Cascade Load!")
}
// Setup image files used in the capture process
cv::Mat captureFrame;
cv::Mat grayscaleFrame;
cv::Mat shrinkFrame;
// Create a vector to store the face found
std::vector<cv::Rect> faces;
std::vector<cv::Mat> Preprocessed_Faces;
std::vector<cv::string> Preprocessed_Faces_Names;
cv::string myname;
std::vector<int> faceLabels;
// Init the face Recognizer
cv::initModule_contrib();
std::string facerecAlgorithm = "FaceRecognizer.Fisherfaces";
cv::Ptr<cv::FaceRecognizer> model;
// Use OpenCV's new FaceRecognizer in the "contrib" module;
model = cv::Algorithm::create<cv::FaceRecognizer>(facerecAlgorithm);
if (model.empty())
{
std::cerr << "ERROR: FaceRecognizer" << std::endl;
}
try
{
//model->load("TrainedModel.xml");
}
catch (const std::exception&)
{
}
// Create a loop to caoture and find faces
while (true)
{
// Capture a new image frame
if (swCamera)
{
captureDevice >> captureFrame;
}
else
{
captureFrame = cv::imread("face3.PNG");
}
// Shrink the captured image, Convert to gray scale and equalize
cv::cvtColor(captureFrame, grayscaleFrame, CV_BGR2GRAY);
cv::equalizeHist(grayscaleFrame, grayscaleFrame);
shrinkFrame = ShrinkingImage(grayscaleFrame);
// Find faces on the shrink image (because it's faster) and store them in the vector array
face_cascade.detectMultiScale(shrinkFrame, faces, 1.1, 4, CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
if (faces.size() > 0)
faces = EnlargeResults(captureFrame, faces);
// Draw a rectangle for all found faces in the vector array on original image
for (int i = 0; i < faces.size(); i++)
{
cv::Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
cv::Point pt2(faces[i].x, faces[i].y);
cv::Mat theFace = grayscaleFrame(faces[i]);
// try to treat the face by identifying the eyes, if the eyes fail to detect, it returns theface
cv::Mat filtered = TreatmentForFace(theFace);
// Collecting faces and learning from them.
if (starTraining && TrainName != "")
{
if (colFace)
{
Preprocessed_Faces.push_back(filtered);
if (myname == "")
{
myname = TrainName;
}
colFace = false;
}
}
else
{
if (!starTraining && Preprocessed_Faces.size() > 0)
{
// create the person folder
std::string command = "mkdir ";
std::string foldercom = "TrainingFolder\\\\\\\\" + myname;
command += foldercom;
system(command.c_str());
// create a string to access the recent created folder
std::string foldername = foldercom.substr(0, foldercom.size() - (myname.size() + 1));
foldername.append("/");
foldername.append(myname);
foldername.append("/");
foldername.append(myname);
// save the colected faces on the folder
for (int i = 0; i < Preprocessed_Faces.size(); i++)
{
std::ostringstream oss;
oss << i;
cv::imwrite(foldername + oss.str() + ".PNG", Preprocessed_Faces[i]);
}
myname = "";
Preprocessed_Faces.clear();
}
}
if (traiModel)
{
// read all folders on the images and for each one give an number id
// read the images for each folder and add on a vector of images
// read the folder name and give to all images to set the label info
Preprocessed_Faces.clear();
faceLabels.clear();
Preprocessed_Faces_Names.clear();
std::string folder = "TrainingFolder\\\\\\\\";
std::vector<cv::string> foldernames;
foldernames = get_all_files_names_within_folder(folder);
std::vector<int> labels;
for (int f = 0; f < foldernames.size(); f++)
{
std::string thisfoldername = folder + foldernames[f];
std::vector<cv::string> filenames;
cv::glob(thisfoldername, filenames);
Preprocessed_Faces_Names.push_back(foldernames[f]);
labels.push_back(f + 1);
for (int fn = 0; fn < filenames.size(); fn++)
{
Preprocessed_Faces.push_back(cv::imread(filenames[fn]));
//std::cout << filenames[fn] << std::endl;
faceLabels.push_back(f + 1);
}
}
cv::imwrite("Traintest.PNG", Preprocessed_Faces[0]);
std::map<int, std::string> map1;
for (int i = 0; i < Preprocessed_Faces_Names.size(); i++)
{
map1.insert(std::pair<int, std::string>(labels[i], Preprocessed_Faces_Names[i]));
std::cout << Preprocessed_Faces_Names[i] << std::endl;
}
model->setLabelsInfo(map1);
model->train(Preprocessed_Faces, faceLabels);
traiModel = false;
}
if (identif)
{
// identify the current face looking on the database
// Prediction Validation
// Get some required data from the FaceRecognizer model.
cv::Mat eigenvectors = model->get<cv::Mat>("eigenvectors");
cv::Mat averageFaceRow = model->get<cv::Mat>("mean");
// Project the input image onto the eigenspace.
cv::Mat projection = cv::subspaceProject(eigenvectors, averageFaceRow, filtered.reshape(1, 1));
// Generate the reconstructed face back from the eigenspace.
cv::Mat reconstructionRow = cv::subspaceReconstruct(eigenvectors, averageFaceRow, projection);
// Make it a rectangular shaped image instead of a single row.
cv::Mat reconstructionMat = reconstructionRow.reshape(1, filtered.rows);
// Convert the floating-point pixels to regular 8-bit uchar.
cv::Mat reconstructedFace = cv::Mat(reconstructionMat.size(), CV_8U);
reconstructionMat.convertTo(reconstructedFace, CV_8U, 1, 0);
cv::imwrite("Teste.PNG", filtered);
cv::imwrite("Teste2.PNG", reconstructedFace);
int identity = model->predict(filtered);
double similarity = getSimilarity(filtered, reconstructedFace);
if (similarity > .7f)
{
//identity = -1; // -1 means that the face is not registred in the trainer
}
std::cout << "This is: " << identity << " and: " << model->getLabelInfo(identity) << std::endl;
identif = false;
}
}
// Print the output
cv::resize(captureFrame, captureFrame, cv::Size(800, 600));
cv::imshow("outputCapture", caGostei + 0
21/09/2021
Marcos
Muito interessante seu código, provavelmente alguém vai te ajudar a soluciona-lo e quando o fizer me ensine como inserir este seu código dentro do compilado que uso BCC5.5.2 (Borland C++ com API do windows n lib Hamour Minigui Extended)
Agradeceria muito e se já resolveu entre em contato para formalizarmos a venda do mesmo.
abraços.
Gostei + 0
Clique aqui para fazer login e interagir na Comunidade :)
Inserção de url
Utilizamos cookies para fornecer uma melhor experiência para nossos usuários, consulte nossa política de privacidade.