removes second model learning, removes second 255 division
This commit is contained in:
parent
2d8cbca1bc
commit
e5058cc8cc
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 aspamers
|
||||
Copyright (c) 2018 Raphael Maenle
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
mymodel.png
BIN
mymodel.png
Binary file not shown.
Before Width: | Height: | Size: 146 KiB |
6
notes.md
6
notes.md
@ -19,6 +19,8 @@ the steps taken so far, which lead to a successfull detection of an image
|
||||
|
||||
- after you've successfully trained the model, it's now saved to 'model_checkpoint' or 'siamese_checkpoint'
|
||||
|
||||
- note that the current model design has removed the second training layer, it now only creates 'siamese_checkpoint'
|
||||
|
||||
- The following steps can be used to classify two images:
|
||||
Note, that it was so far only tested using images in a 'pdb' shell from the mnist_siamese_example script
|
||||
|
||||
@ -26,8 +28,8 @@ the steps taken so far, which lead to a successfull detection of an image
|
||||
import tensorflow.keras as keras
|
||||
from PIL import image
|
||||
model = keras.models.load_model('./siamese_checkpoint')
|
||||
image1 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_254_100.jpg').convert('RGB').resize((28, 28))) / 255 / 255
|
||||
image2 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_250_100.jpg').convert('RGB').resize((28, 28))) / 255 / 255
|
||||
image1 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_254_100.jpg').convert('RGB').resize((28, 28))) / 255
|
||||
image2 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_250_100.jpg').convert('RGB').resize((28, 28))) / 255
|
||||
# note that the double division through 255 is only because the model bas taught with this double division, depends on
|
||||
# the input numbers of course
|
||||
|
||||
|
15
setup.py
15
setup.py
@ -1,15 +0,0 @@
|
||||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name='siamese',
|
||||
version='0.1',
|
||||
packages=[''],
|
||||
url='https://github.com/aspamers/siamese',
|
||||
license='MIT',
|
||||
author='Abram Spamers',
|
||||
author_email='aspamers@gmail.com',
|
||||
install_requires=[
|
||||
'keras', 'numpy',
|
||||
],
|
||||
description='An easy to use Keras Siamese Neural Network implementation'
|
||||
)
|
10
siamese.py
10
siamese.py
@ -8,8 +8,6 @@ import numpy as np
|
||||
from tensorflow.keras.layers import Input
|
||||
from tensorflow.keras.models import Model
|
||||
|
||||
import pdb
|
||||
|
||||
|
||||
class SiameseNetwork:
|
||||
"""
|
||||
@ -73,8 +71,6 @@ class SiameseNetwork:
|
||||
test_generator = self.__pair_generator(x_test, y_test, batch_size)
|
||||
test_steps = math.floor(max(len(x_test) / batch_size, 1))
|
||||
|
||||
pdb.set_trace()
|
||||
|
||||
self.siamese_model.fit(train_generator,
|
||||
steps_per_epoch=train_steps,
|
||||
validation_data=test_generator,
|
||||
@ -138,7 +134,7 @@ class SiameseNetwork:
|
||||
"""
|
||||
generator = self.__pair_generator(x, y, batch_size=batch_size)
|
||||
steps = len(x) / batch_size
|
||||
return self.siamese_model.evaluate_generator(generator, steps=steps, *args, **kwargs)
|
||||
return self.siamese_model.evaluate(generator, steps=steps, *args, **kwargs)
|
||||
|
||||
def __initialize_siamese_model(self):
|
||||
"""
|
||||
@ -225,12 +221,8 @@ class SiameseNetwork:
|
||||
for _ in range(int(num_negative_pairs)):
|
||||
cls_1, cls_2 = self.__randint_unequal(0, num_classes - 1)
|
||||
|
||||
try:
|
||||
index_1 = random.randint(0, len(class_indices[cls_1]) - 1)
|
||||
index_2 = random.randint(0, len(class_indices[cls_2]) - 1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pdb.set_trace()
|
||||
|
||||
|
||||
element_index_1, element_index_2 = class_indices[cls_1][index_1], class_indices[cls_2][index_2]
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -26,17 +26,15 @@ from siamese import SiameseNetwork
|
||||
import os, math, numpy as np
|
||||
from PIL import Image
|
||||
|
||||
import pdb
|
||||
|
||||
batch_size = 128
|
||||
num_classes = 131
|
||||
epochs = 999999
|
||||
|
||||
# input image dimensions
|
||||
img_rows, img_cols = 28, 28
|
||||
|
||||
def createTrainingData():
|
||||
base_dir = '../towards/data/fruits-360/Training/'
|
||||
base_dir = 'data/fruits-360/Training/'
|
||||
train_test_split = 0.7
|
||||
no_of_files_in_each_class = 80
|
||||
|
||||
@ -133,10 +131,6 @@ else:
|
||||
|
||||
x_train = x_train.astype('float32')
|
||||
x_test = x_test.astype('float32')
|
||||
x_train /= 255
|
||||
x_test /= 255
|
||||
|
||||
pdb.set_trace()
|
||||
|
||||
def create_own_base_model(input_shape):
|
||||
model_input = Input(shape=input_shape)
|
||||
@ -220,7 +214,7 @@ def get_batch(x_train, y_train, x_test, y_test, cat_train, batch_size=64):
|
||||
|
||||
|
||||
num_classes = 131
|
||||
epochs = 2000
|
||||
epochs = 20
|
||||
|
||||
base_model = create_base_model(input_shape)
|
||||
head_model = create_head_model(base_model.output_shape)
|
||||
@ -254,6 +248,7 @@ siamese_network.fit(x_train, y_train,
|
||||
# print("!!!!!!")
|
||||
# siamese_network.load_weights(siamese_checkpoint_path)
|
||||
|
||||
'''
|
||||
embedding = base_model.outputs[-1]
|
||||
|
||||
y_train = keras.utils.to_categorical(y_train)
|
||||
@ -293,7 +288,8 @@ model.fit(x_train, y_train,
|
||||
# print("!!!!!!")
|
||||
|
||||
# model.load_weights(model_checkpoint_path)
|
||||
'''
|
||||
|
||||
score = model.evaluate(x_test, y_test, verbose=0)
|
||||
score = siamese_network.evaluate(x_test, y_test, batch_size=60, verbose=0)
|
||||
print('Test loss:', score[0])
|
||||
print('Test accuracy:', score[1])
|
@ -6,6 +6,10 @@ Instead of using a fixed number of epochs this version continues to train
|
||||
until the stop criteria is reached.
|
||||
|
||||
Model performance should be around 99.4% after training.
|
||||
|
||||
|
||||
This scripts shows how to correctly handle mnist data
|
||||
and how to use it for the model.fit() function
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
Loading…
Reference in New Issue
Block a user