1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586 |
- import face_recognition
- from sklearn import svm
- import joblib
- import os
- import concurrent.futures
- import numpy as np
- import time
- # Training the SVC classifier
- # The training data would be all the face encodings from all the known images and the labels are their names
- encodings = []
- names = []
- tuples = []
- train_list = []
- lock = None
- is_train = False
- if not is_train:
- try:
- clf = joblib.load('saved_model.pkl')
- except:
- clf = None
- if clf is None:
- is_train = True
- def train_image(image, person):
- face = face_recognition.load_image_file(image)
- face_bounding_boxes = face_recognition.face_locations(face)
- # If training image contains exactly one face
- if len(face_bounding_boxes) == 1:
- face_enc = face_recognition.face_encodings(face)[0]
- # Add face encoding for current image with corresponding label (name) to the training data
- tuples.append((face_enc, person))
- else:
- print(image + " was skipped and can't be used for training")
- if is_train:
- # Training directory
- train_str = os.path.join('data', 'peeps', 'anchor')
- train_dir = os.listdir(os.path.join('data', 'peeps', 'anchor'))
- # Loop through each person in the training directory
- with concurrent.futures.ThreadPoolExecutor() as executor:
- for person in train_dir:
- pix_str = os.path.join(train_str, person)
- pix = os.listdir(os.path.join(train_str, person))
- # Loop through each training image for the current person
- for person_img in pix:
- # Get the face encodings for the face in each image file
- image = os.path.join(pix_str, person_img)
- executor.submit(train_image, image, person)
- # Create and train the SVC classifier
- encodings = [x for x, _ in tuples]
- names = [y for _, y in tuples]
- clf = svm.SVC(gamma='scale', probability=True)
- clf.fit(encodings, names)
- joblib.dump(clf, 'saved_model.pkl')
- # Load the test image with unknown faces into a numpy array
- test_image = face_recognition.load_image_file(os.path.join('application_data', 'input_image', 'input_image3.jpg'))
- # Find all the faces in the test image using the default HOG-based model
- face_locations = face_recognition.face_locations(test_image)
- no = len(face_locations)
- print("Number of faces detected: ", no)
- # Predict all the faces in the test image using the trained classifier
- print("Found:")
- for i in range(no):
- test_image_enc = face_recognition.face_encodings(test_image)[i]
- start_time = time.perf_counter_ns()
- proba = clf.predict_proba([test_image_enc])
- end_time = time.perf_counter_ns()
- process_time = end_time - start_time
- classes = clf.classes_
- i = np.argmax(proba)
- proba = list(*proba)
- name = classes[i]
- print(name, "{:.2f}".format(proba[i]), proba, process_time)
|