diff --git a/coco_gen.py b/coco_gen.py new file mode 100644 index 0000000..bb473e1 --- /dev/null +++ b/coco_gen.py @@ -0,0 +1,74 @@ +import json, os +import matplotlib.pyplot as plt +import numpy as np + +from PIL import Image + + +no_label = 0 +small = 0 +passed = 0 + +count = 0 + + +print("loading coco annotations...") +coco = json.load(open('./coco/annotations/instances_val2014.json')) +print("done") + +def findAnnotationName(annotationId): + for c in coco['categories']: + if c['id'] == annotationId: + return c['name'] + +def findAnnotationToId(ident): + for annotation in coco['annotations']: + img_an = annotation['image_id'] + if img_an == ident: + return annotation + +def show(pil, pause=0.2): + ImageNumpyFormat = np.asarray(pil) + plt.imshow(ImageNumpyFormat) + plt.draw() + plt.pause(pause) # pause how many seconds + plt.close() + + +def parseImage(coImg): + # open image file + + path = "coco/val2014/" + coImg['file_name'] + img = Image.open(path) + + an = findAnnotationToId(coImg['id']) + if an == None: + no_label += 1 + return + + + path = "coco/val2014/" + coImg['file_name'] + img = Image.open(path) + c = an['bbox'] + crop = img.crop((c[0], c[1], c[0]+c[2], c[1]+c[3])) + + if crop.width < 64 or crop.height < 64: + small += 1 + return + + imagePath = f"classified/{findAnnotationName(an['category_id'])}/{an['id']}.png" + os.makedirs(os.path.dirname(imagePath), exist_ok=True) + crop.save(imagePath) + passed += 1 + +if __name__ == "__main__": + for coImg in coco['images']: + parseImage(coImg) + count += 1 + if count % 100 == 0: + print("status:") + print(f"no labels: {no_label}") + print(f"to small: {small") + print(f"passed: {passed}") + print("-----") + diff --git a/evaluate.py b/evaluate.py index ae55361..ec452cb 100644 --- a/evaluate.py +++ b/evaluate.py @@ -2,12 +2,20 @@ import tensorflow.keras as keras from PIL import Image import numpy as np import pdb -model = keras.models.load_model('./siamese_checkpoint') -image1 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_254_100.jpg').convert('RGB').resize((100, - 100))) / 255 -image2 = np.asarray(Image.open('../towards/data/fruits-360/Training/Avocado/r_250_100.jpg').convert('RGB').resize((100, - 100))) / 255 -output = model.predict([np.array([image2]), np.array([image1])]) +def getI(path): + return np.asarray(Image.open(path).convert('RGB').resize((100, 100))) / 255 + +def predict(image1, image2): + return model.predict([np.array([image2]), np.array([image1])]) + +model = keras.models.load_model('./siamese_checkpoint') +image1 = getI('../towards/data/fruits-360/Training/Avocado/r_254_100.jpg') +image2 = getI('../towards/data/fruits-360/Training/Avocado/r_250_100.jpg') + +print(predict(image1, image2)) + pdb.set_trace() + +