1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 |
- import face_alignment
- from skimage import io
- import joblib
- import os
- from sklearn import svm
- import concurrent.futures
- import numpy as np
- import time
- fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
- # Training the SVC classifier
- # The training data would be all the face encodings from all the known images and the labels are their names
- encodings = []
- names = []
- tuples = []
- train_list = []
- lock = None
- is_train = False
- if not is_train:
- try:
- clf = joblib.load('saved_model_fa.pkl')
- except:
- clf = None
- if clf is None:
- is_train = True
- def train_image(image, person):
- img = io.imread(image)
- preds = fa.get_landmarks(img)
- # If training image contains exactly one face
- if len(preds) == 1:
- tuples.append((preds[0].reshape(136), person))
- else:
- print(image + " was skipped and can't be used for training")
- if is_train:
- # Training directory
- train_str = os.path.join('data', 'peeps', 'anchor')
- train_dir = os.listdir(os.path.join('data', 'peeps', 'anchor'))
- # Loop through each person in the training directory
- with concurrent.futures.ThreadPoolExecutor() as executor:
- for person in train_dir:
- pix_str = os.path.join(train_str, person)
- pix = os.listdir(os.path.join(train_str, person))
- # Loop through each training image for the current person
- for person_img in pix:
- # Get the face encodings for the face in each image file
- image = os.path.join(pix_str, person_img)
- executor.submit(train_image, image, person)
- # Create and train the SVC classifier
- encodings = [x for x, _ in tuples]
- names = [y for _, y in tuples]
- clf = svm.SVC(gamma='scale', probability=True)
- clf.fit(encodings, names)
- joblib.dump(clf, 'saved_model_fa.pkl')
- # Load the test image with unknown faces into a numpy array
- test_image = io.imread(os.path.join('application_data', 'input_image', 'input_image4.jpg'))
- # Find all the faces in the test image using the default HOG-based model
- face_locations = fa.get_landmarks(test_image)
- no = len(face_locations)
- print("Number of faces detected: ", no)
- # Predict all the faces in the test image using the trained classifier
- print("Found:")
- for i in range(no):
- test_image_enc = face_locations[i].reshape(136)
- start_time = time.perf_counter_ns()
- proba = clf.predict_proba([test_image_enc])
- end_time = time.perf_counter_ns()
- process_time = end_time - start_time
- classes = clf.classes_
- print(classes)
- i = np.argmax(proba)
- proba = list(*proba)
- name = classes[i]
- print(name, "{:.2f}".format(proba[i]), proba, process_time)
- # input = io.imread(os.path.join('application_data','input_image','input_image.jpg'))
- # preds = fa.get_landmarks(input)
- # print(preds)
|