kevin 2 лет назад
Родитель
Сommit
f9d2399113
6 измененных файлов с 0 добавлено и 628 удалено
  1. 0 92
      face_align.py
  2. 0 284
      main.py
  3. 0 47
      take_picture.py
  4. 0 52
      take_picture_snn.py
  5. 0 99
      verify_face.py
  6. 0 54
      verify_face_2.py

+ 0 - 92
face_align.py

@@ -1,92 +0,0 @@
-import face_alignment
-from skimage import io
-import joblib
-import os
-from sklearn import svm
-import concurrent.futures
-import numpy as np
-import time
-
-fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
-
-# Training the SVC classifier
-
-# The training data would be all the face encodings from all the known images and the labels are their names
-encodings = []
-names = []
-tuples = []
-train_list = []
-lock = None
-
-is_train = False
-
-if not is_train:
-    try:
-        clf = joblib.load('saved_model_fa.pkl')
-    except:
-        clf = None
-    if clf is None:
-        is_train = True
-
-def train_image(image, person):
-    img = io.imread(image)
-    preds = fa.get_landmarks(img)
-
-    # If training image contains exactly one face
-    if len(preds) == 1:
-        tuples.append((preds[0].reshape(136), person))
-    else:
-        print(image + " was skipped and can't be used for training")
-
-if is_train:
-    # Training directory
-    train_str = os.path.join('data', 'peeps', 'anchor')
-    train_dir = os.listdir(os.path.join('data', 'peeps', 'anchor'))
-
-    # Loop through each person in the training directory
-    with concurrent.futures.ThreadPoolExecutor() as executor:
-        for person in train_dir:
-            pix_str = os.path.join(train_str, person)
-            pix = os.listdir(os.path.join(train_str, person))
-
-            # Loop through each training image for the current person
-            for person_img in pix:
-                # Get the face encodings for the face in each image file
-                image = os.path.join(pix_str, person_img)
-                executor.submit(train_image, image, person)
-
-    # Create and train the SVC classifier
-
-    encodings = [x for x, _ in tuples]
-    names = [y for _, y in tuples]
-
-    clf = svm.SVC(gamma='scale', probability=True)
-    clf.fit(encodings, names)
-    joblib.dump(clf, 'saved_model_fa.pkl')
-
-# Load the test image with unknown faces into a numpy array
-test_image = io.imread(os.path.join('application_data', 'input_image', 'input_image4.jpg'))
-
-# Find all the faces in the test image using the default HOG-based model
-face_locations = fa.get_landmarks(test_image)
-no = len(face_locations)
-print("Number of faces detected: ", no)
-
-# Predict all the faces in the test image using the trained classifier
-print("Found:")
-for i in range(no):
-    test_image_enc = face_locations[i].reshape(136)
-    start_time = time.perf_counter_ns()
-    proba = clf.predict_proba([test_image_enc])
-    end_time = time.perf_counter_ns()
-    process_time = end_time - start_time
-    classes = clf.classes_
-    print(classes)
-    i = np.argmax(proba)
-    proba = list(*proba)
-    name = classes[i]
-    print(name, "{:.2f}".format(proba[i]), proba, process_time)
-
-# input = io.imread(os.path.join('application_data','input_image','input_image.jpg'))
-# preds = fa.get_landmarks(input)
-# print(preds)

+ 0 - 284
main.py

@@ -1,284 +0,0 @@
-# Import standard dependencies
-import cv2
-import os
-import uuid
-import random
-import numpy as np
-from matplotlib import pyplot as plt
-
-# Import tensorflow dependencies - Functional API
-from tensorflow.keras.models import Model
-from tensorflow.keras.layers import Layer, Conv2D, Dense, MaxPooling2D, Input, Flatten
-from tensorflow.keras.metrics import Precision, Recall
-import tensorflow as tf
-
-train_mode = False
-retrain_mode = False
-MODEL_NAME = "siamesemodelv4.h5"
-
-# Avoid OOM errors by setting GPU Memory Consumption Growth
-gpus = tf.config.experimental.list_physical_devices('GPU')
-for gpu in gpus:
-    tf.config.experimental.set_memory_growth(gpu, True)
-
-# Setup paths
-POS_PATH = os.path.join('data', 'positive')
-NEG_PATH = os.path.join('data', 'negative')
-ANC_PATH = os.path.join('data', 'anchor')
-
-
-def data_aug(img):
-    data = []
-    for i in range(9):
-        img = tf.image.stateless_random_brightness(img, max_delta=0.02, seed=(1, 2))
-        img = tf.image.stateless_random_contrast(img, lower=0.6, upper=1, seed=(1, 3))
-        # img = tf.image.stateless_random_crop(img, size=(20,20,3), seed=(1,2))
-        img = tf.image.stateless_random_flip_left_right(img, seed=(np.random.randint(100), np.random.randint(100)))
-        img = tf.image.stateless_random_jpeg_quality(img, min_jpeg_quality=90, max_jpeg_quality=100,
-                                                     seed=(np.random.randint(100), np.random.randint(100)))
-        img = tf.image.stateless_random_saturation(img, lower=0.9, upper=1,
-                                                   seed=(np.random.randint(100), np.random.randint(100)))
-
-        data.append(img)
-
-    return data
-
-
-def preprocess(file_path):
-    # Read in image from file path
-    byte_img = tf.io.read_file(file_path)
-    # Load in the image
-    img = tf.io.decode_jpeg(byte_img)
-
-    # Preprocessing steps - resizing the image to be 100x100x3
-    img = tf.image.resize(img, (100, 100))
-    # Scale image to be between 0 and 1
-    img = img / 255.0
-
-    # Return image
-    return img
-
-
-def preprocess_twin(input_img, validation_img, label):
-    return (preprocess(input_img), preprocess(validation_img), label)
-
-
-# for file_name in os.listdir(os.path.join(ANC_PATH)):
-#     img_path = os.path.join(ANC_PATH, file_name)
-#     img = cv2.imread(img_path)
-#     augmented_images = data_aug(img)
-#
-#     for image in augmented_images:
-#         cv2.imwrite(os.path.join(ANC_PATH, 'surya-{}.jpg'.format(uuid.uuid1())), image.numpy())
-# #
-# for file_name in os.listdir(os.path.join(POS_PATH)):
-#     img_path = os.path.join(POS_PATH, file_name)
-#     img = cv2.imread(img_path)
-#     augmented_images = data_aug(img)
-#
-#     for image in augmented_images:
-#         cv2.imwrite(os.path.join(POS_PATH, 'surya-{}.jpg'.format(uuid.uuid1())), image.numpy())
-
-anchor = tf.data.Dataset.list_files(ANC_PATH + '/*.jpg').take(2000)
-positive = tf.data.Dataset.list_files(POS_PATH + '/*.jpg').take(2000)
-negative = tf.data.Dataset.list_files(NEG_PATH + '/*.jpg').take(2000)
-
-dir_test = anchor.as_numpy_iterator()
-
-positives = tf.data.Dataset.zip((anchor, positive, tf.data.Dataset.from_tensor_slices(tf.ones(len(anchor)))))
-negatives = tf.data.Dataset.zip((anchor, negative, tf.data.Dataset.from_tensor_slices(tf.zeros(len(anchor)))))
-data = positives.concatenate(negatives)
-
-samples = data.as_numpy_iterator()
-
-example = samples.next()
-
-res = preprocess_twin(*example)
-
-data = data.map(preprocess_twin)
-data = data.cache()
-data = data.shuffle(buffer_size=10000)
-
-train_data = data.take(round(len(data) * .7))
-train_data = train_data.batch(16)
-train_data = train_data.prefetch(8)
-
-test_data = data.skip(round(len(data) * .7))
-test_data = test_data.take(round(len(data) * .3))
-test_data = test_data.batch(16)
-test_data = test_data.prefetch(8)
-
-
-def save_model(model):
-    model.save(MODEL_NAME)
-
-
-def load_model():
-    model = None
-    if retrain_mode:
-        return model
-    try:
-        model = tf.keras.models.load_model(MODEL_NAME, custom_objects={'L1Dist': L1Dist,
-                                                                                'BinaryCrossentropy': tf.losses.BinaryCrossentropy})
-    except (ImportError, IOError):
-        return None
-    return model
-
-
-# Build embedding layer
-
-def make_embedding():
-    inp = Input(shape=(100, 100, 3), name='input_image')
-    c1 = Conv2D(64, (10, 10), activation='relu')(inp)
-    m1 = MaxPooling2D(64, (2, 2), padding='same')(c1)
-    c2 = Conv2D(128, (7, 7), activation='relu')(m1)
-    m2 = MaxPooling2D(64, (2, 2), padding='same')(c2)
-    c3 = Conv2D(128, (4, 4), activation='relu')(m2)
-    m3 = MaxPooling2D(64, (2, 2), padding='same')(c3)
-    c4 = Conv2D(256, (4, 4), activation='relu')(m3)
-    f1 = Flatten()(c4)
-    d1 = Dense(4096, activation='sigmoid')(f1)
-    return Model(inputs=[inp], outputs=[d1], name='embedding')
-
-
-embedding = make_embedding()
-
-
-# Siamese L1 Distance class
-class L1Dist(Layer):
-
-    # Init method - inheritance
-    def __init__(self, **kwargs):
-        super().__init__()
-
-    # Magic happens here - similarity calculation
-    def call(self, input_embedding, validation_embedding):
-        return tf.math.abs(input_embedding - validation_embedding)
-
-
-def make_siamese():
-    input_image = Input(name='input_img', shape=(100, 100, 3))
-    validation_image = Input(name='validation_img', shape=(100, 100, 3))
-    inp_embedding = embedding(input_image)
-    val_embedding = embedding(validation_image)
-    siamese_layer = L1Dist()
-    distances = siamese_layer(inp_embedding, val_embedding)
-    classifier = Dense(1, activation='sigmoid')(distances)
-    return Model(inputs=[input_image, validation_image], outputs=classifier, name='SiameseNetwork')
-
-
-# Training
-siamese_model = load_model()
-if siamese_model is None:
-    siamese_model = make_siamese()
-    retrain_mode = True
-
-binary_cross_loss = tf.losses.BinaryCrossentropy()
-opt = tf.keras.optimizers.Adam(1e-4)
-
-
-# checkpoint_dir = './training_checkpoints'
-# checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
-# checkpoint = tf.train.Checkpoint(opt=opt, siamese_model=siamese_model)
-
-
-@tf.function
-def train_step(batch):
-    # Record all of our operations
-    with tf.GradientTape() as tape:
-        # Get anchor and positive/negative image
-        X = batch[:2]
-        # Get label
-        y = batch[2]
-
-        # Forward pass
-        yhat = siamese_model(X, training=True)
-        # Calculate loss
-        loss = binary_cross_loss(y, yhat)
-    print(loss)
-
-    # Calculate gradients
-    grad = tape.gradient(loss, siamese_model.trainable_variables)
-
-    # Calculate updated weights and apply to siamese model
-    opt.apply_gradients(zip(grad, siamese_model.trainable_variables))
-
-    # Return loss
-    return loss
-
-
-def train(data, EPOCHS):
-    # Loop through epochs
-    for epoch in range(1, EPOCHS + 1):
-        print('\n Epoch {}/{}'.format(epoch, EPOCHS))
-        progbar = tf.keras.utils.Progbar(len(data))
-
-        # Creating a metric object
-        r = Recall()
-        p = Precision()
-
-        # Loop through each batch
-        for idx, batch in enumerate(data):
-            # Run train step here
-            loss = train_step(batch)
-            yhat = siamese_model.predict(batch[:2])
-            r.update_state(batch[2], yhat)
-            p.update_state(batch[2], yhat)
-            progbar.update(idx + 1)
-        print(loss.numpy(), r.result().numpy(), p.result().numpy())
-
-        # Save checkpoints
-        # if epoch % 10 == 0:
-        #     checkpoint.save(file_prefix=checkpoint_prefix)
-
-
-def predict():
-    # test_input, test_val, y_true = test_data.as_numpy_iterator().next()
-    # y_hat = siamese_model.predict([test_input, test_val])
-
-    r = Recall()
-    p = Precision()
-
-    for test_input, test_val, y_true in test_data.as_numpy_iterator():
-        yhat = siamese_model.predict([test_input, test_val])
-        r.update_state(y_true, yhat)
-        p.update_state(y_true, yhat)
-
-    print(r.result().numpy(), p.result().numpy())
-
-
-def verify(model, verification_threshold):
-    # Build results array
-    results = []
-    paths = os.listdir(os.path.join('application_data', 'verification_images'))
-    print(paths)
-    for image in paths:
-        input_img = preprocess(os.path.join('application_data', 'input_image', 'input_image2.jpg'))
-        validation_img = preprocess(os.path.join('application_data', 'verification_images', image))
-        result = model.predict(list(np.expand_dims([input_img, validation_img], axis=1)))
-        print(image, result[0][0])
-        results.append(result[0][0])
-
-    # Detection Threshold: Metric above which a prediciton is considered positive
-    pos_array = np.array(results)
-    index = np.argmax(pos_array)
-    detection = pos_array[index]
-    filename = paths[index]
-
-    # Verification Threshold: Proportion of positive predictions / total positive samples
-    # verification = detection / len(os.listdir(os.path.join('application_data', 'verification_images')))
-    verification = detection
-    verified = verification > verification_threshold
-
-    return results, verified, detection, filename
-
-
-EPOCHS = 50
-
-if __name__ == '__main__':
-    if train_mode or retrain_mode:
-        train(train_data, EPOCHS)
-        save_model(siamese_model)
-    # predict()
-    r, v, d, f = verify(siamese_model, 0.5)
-    print(f"Results: {r}\nVerified: {v}\nDetection: {d}\nFilename: {f}")

+ 0 - 47
take_picture.py

@@ -1,47 +0,0 @@
-import cv2
-import time
-
-key = cv2.waitKey(1)
-webcam = cv2.VideoCapture(0)
-while True:
-    try:
-        check, frame = webcam.read()
-        # print(check)  # prints true as long as the webcam is running
-        # print(frame)  # prints matrix values of each framecd
-        cv2.imshow("Capturing", frame)
-        key = cv2.waitKey(1)
-        if key == ord('s'):
-            ts = time.time()
-            cv2.imwrite(filename='saved_img-{}.jpg'.format(ts), img=frame)
-            # webcam.release()
-            # img_new = cv2.imread('saved_img.jpg', cv2.IMREAD_GRAYSCALE)
-            # img_new = cv2.imshow("Captured Image", img_new)
-            cv2.waitKey(500)
-            # cv2.destroyAllWindows()
-            # print("Processing image...")
-            # img_ = cv2.imread('saved_img.jpg', cv2.IMREAD_ANYCOLOR)
-            # print("Converting RGB image to grayscale...")
-            # gray = cv2.cvtColor(img_, cv2.COLOR_BGR2GRAY)
-            # print("Converted RGB image to grayscale...")
-            # print("Resizing image to 28x28 scale...")
-            # img_ = cv2.resize(gray, (28, 28))
-            # print("Resized...")
-            # img_resized = cv2.imwrite(filename='saved_img-final.jpg', img=img_)
-            print("Image saved!")
-
-            # break
-        elif key == ord('q'):
-            print("Turning off camera.")
-            webcam.release()
-            print("Camera off.")
-            print("Program ended.")
-            cv2.destroyAllWindows()
-            break
-
-    except(KeyboardInterrupt):
-        print("Turning off camera.")
-        webcam.release()
-        print("Camera off.")
-        print("Program ended.")
-        cv2.destroyAllWindows()
-        break

+ 0 - 52
take_picture_snn.py

@@ -1,52 +0,0 @@
-# Establish a connection to the webcam
-import uuid
-import cv2
-import os
-
-# Setup paths
-POS_PATH = os.path.join('data', 'positive')
-NEG_PATH = os.path.join('data', 'negative')
-ANC_PATH = os.path.join('data', 'anchor')
-
-cap = cv2.VideoCapture(0)
-CAMERA_OFFSET_X = 750
-CAMERA_OFFSET_Y = 250
-CAMERA_SIZE = 500
-FEED_SIZE = 250
-
-NAME = "kevin"
-while cap.isOpened():
-    ret, frame = cap.read()
-
-    # Cut down frame to 250x250px
-    frame = frame[CAMERA_OFFSET_Y:CAMERA_OFFSET_Y + CAMERA_SIZE, CAMERA_OFFSET_X:CAMERA_OFFSET_X + CAMERA_SIZE, :]
-
-    # Collect anchors
-    if cv2.waitKey(1) & 0XFF == ord('a'):
-        # Create the unique file path
-        imgname = os.path.join(ANC_PATH, '{}-{}.jpg'.format(NAME,uuid.uuid1()))
-        # Resize image
-        resized = cv2.resize(frame, (FEED_SIZE, FEED_SIZE))
-        # Write out anchor image
-        cv2.imwrite(imgname, resized)
-
-    # Collect positives
-    if cv2.waitKey(1) & 0XFF == ord('p'):
-        # Create the unique file path
-        imgname = os.path.join(POS_PATH, '{}-{}.jpg'.format(NAME,uuid.uuid1()))
-        # Resize image
-        resized = cv2.resize(frame, (FEED_SIZE, FEED_SIZE))
-        # Write out positive image
-        cv2.imwrite(imgname, resized)
-
-    # Show image back to screen
-    cv2.imshow('Image Collection', frame)
-
-    # Breaking gracefully
-    if cv2.waitKey(1) & 0XFF == ord('q'):
-        break
-
-# Release the webcam
-cap.release()
-# Close the image show frame
-cv2.destroyAllWindows()

+ 0 - 99
verify_face.py

@@ -1,99 +0,0 @@
-import face_recognition
-import cv2
-import numpy as np
-import joblib
-import time
-
-# This is a super simple (but slow) example of running face recognition on live video from your webcam.
-# There's a second example that's a little more complicatedq but runs faster.
-
-# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
-# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
-# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
-
-# Get a reference to webcam #0 (the default one)
-video_capture = cv2.VideoCapture(0)
-
-clf = joblib.load('saved_model_2.pkl')
-classes = clf.classes_
-
-threshold = 0.9
-
-dummy_data = [
-    {
-        "name": "Bayu",
-        "address": "299 St Louis Road Oak Forest, IL 60452",
-        "nik": "1000076456784631"
-    },
-    {
-        "name": "Dio",
-        "address": "22 Whitemarsh St. Mansfield, MA 02048",
-        "nik": "1000024792887549"
-    },
-    {
-        "name": "Hadi",
-        "address": "643 Honey Creek Dr. Milledgeville, GA 31061",
-        "nik": "1000038502830420"
-    },
-    {
-        "name": "Kevin",
-        "address": "881 Cooper Ave. Hummelstown, PA 17036",
-        "nik": "1000045356476664"
-    },
-    {
-        "name": "Matrix",
-        "address": "580 Glenwood Dr. Garner, NC 27529",
-        "nik": "1000023452134598"
-    },
-    {
-        "name": "Surya",
-        "address": "909 South St Paul Street Hopewell, VA 23860",
-        "nik": "1000075656784734"
-    },
-]
-
-while True:
-    # Grab a single frame of video
-    ret, frame = video_capture.read()
-
-    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
-    rgb_frame = frame[:, :, ::-1]
-
-    # Find all the faces and face enqcodings in the frame of video
-    face_locations = face_recognition.face_locations(rgb_frame)
-    print(face_locations)
-    no = len(face_locations)
-    print("Number of faces detected: ", no)
-    face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
-
-    # Loop through each face in this frame of video
-    for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
-        start_time = time.perf_counter_ns()
-        proba_list = clf.predict_proba([face_encoding])
-        end_time = time.perf_counter_ns()
-        process_time = end_time - start_time
-        i = np.argmax(proba_list)
-        proba = list(*proba_list)[i]
-        name = dummy_data[i]["name"]
-        print(name, "{:.2f}".format(proba), proba_list, process_time)
-
-        # Draw a box around the face
-        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
-
-        # Draw a label with a name below the face
-        cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
-        if proba > threshold:
-            font = cv2.FONT_HERSHEY_DUPLEX
-            cv2.putText(frame, "{} {:.2f}".format(name, proba), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
-
-
-    # Display the resulting image
-    cv2.imshow('Video', frame)
-
-    # Hit 'q' on the keyboard to quit!
-    if cv2.waitKey(1) & 0xFF == ord('q'):
-        break
-
-# Release handle to the webcam
-video_capture.release()
-cv2.destroyAllWindows()

+ 0 - 54
verify_face_2.py

@@ -1,54 +0,0 @@
-import face_recognition
-import cv2
-import numpy as np
-import joblib
-import time
-import os
-
-clf = joblib.load('saved_model.pkl')
-classes = clf.classes_
-
-threshold = 0.65
-
-test_image = face_recognition.load_image_file(os.path.join('application_data', 'input_image', 'input_image2.jpg'))
-
-while True:
-
-    rgb_frame = test_image[:, :, ::-1]
-
-    # Find all the faces and face enqcodings in the frame of video
-    face_locations = face_recognition.face_locations(rgb_frame)
-    no = len(face_locations)
-    print("Number of faces detected: ", no)
-    face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
-
-    # Loop through each face in this frame of video
-    for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
-        start_time = time.perf_counter_ns()
-        proba_list = clf.predict_proba([face_encoding])
-        end_time = time.perf_counter_ns()
-        process_time = end_time - start_time
-        i = np.argmax(proba_list)
-        proba = list(*proba_list)[i]
-        name = classes[i]
-        print(name, "{:.2f}".format(proba), proba_list, process_time)
-
-        # Draw a box around the face
-        cv2.rectangle(test_image, (left, top), (right, bottom), (0, 0, 255), 2)
-
-        # Draw a label with a name below the face
-        cv2.rectangle(test_image, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
-        if(proba > threshold):
-            font = cv2.FONT_HERSHEY_DUPLEX
-            cv2.putText(test_image, "{} {:.2f}".format(name, proba), (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
-
-
-    # Display the resulting image
-    cv2.imshow('Video', test_image)
-
-    # Hit 'q' on the keyboard to quit!
-    if cv2.waitKey(1) & 0xFF == ord('q'):
-        break
-
-# Release handle to the webcam
-cv2.destroyAllWindows()