Während ich mein Modell basierend auf dem siamesischen Netzwerk speichere. Ich bekomme Warnungen und einen FehlerPython

Python-Programme
Anonymous
 Während ich mein Modell basierend auf dem siamesischen Netzwerk speichere. Ich bekomme Warnungen und einen Fehler

Post by Anonymous »

Nach dem Ausführen < /p>

Code: Select all

siamese_model.save("my_model.keras")
< /code>
Es zeigt eine Warnung und einen Fehler: < /p>
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built.
`model.compile_metrics` will be empty until you train or evaluate the model.

ImportError: cannot import name '__version__' from 'tensorflow.python.keras'
(C:\Users\91939\PycharmProjects\PythonProject\.venv\Lib\site-packages\tensorflow\python\keras\__init__.py)
< /code>
Ich habe < /p>
hinzugefügtfrom keras import __version__
tf.keras.__version__ = __version__
< /code>
Aber es hat nicht funktioniert.
Hier ist der vollständige Code:  < /p>
import cv2
import os
import random
import numpy as np
from matplotlib import pyplot as plt
#%%
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Layer, Conv2D, Dense, MaxPooling2D, Input, Flatten
import tensorflow as tf
#%%
from keras import __version__
tf.keras.__version__ = __version__
#%%
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
#%%
POS_PATH_1 = os.path.join('data', 'person1_positive')
POS_PATH_2 = os.path.join('data', 'person2_positive')
NEG_PATH = os.path.join('data', 'negative')
ANC_PATH_1 = os.path.join('data', 'person1_anchor')
ANC_PATH_2 = os.path.join('data', 'person2_anchor')
#%%
anchor_1 = tf.data.Dataset.list_files(ANC_PATH_1+'/*.jpg').take(300)
anchor_2 = tf.data.Dataset.list_files(ANC_PATH_2+'/*.jpg').take(300)
positive_1 = tf.data.Dataset.list_files(POS_PATH_1+'/*.jpg').take(300)
positive_2 = tf.data.Dataset.list_files(POS_PATH_2+'/*.jpg').take(300)
negative = tf.data.Dataset.list_files(NEG_PATH+'/*.jpg').take(600)
#%%
def preprocess(file_path):
byte_img = tf.io.read_file(file_path)
img = tf.io.decode_jpeg(byte_img)
img = tf.image.resize(img, (105,105))
img = img / 255.0
return img
#%%
positives_1 = tf.data.Dataset.zip((anchor_1, positive_1, tf.data.Dataset.from_tensor_slices(tf.ones(len(anchor_1)))))
positives_2 = tf.data.Dataset.zip((anchor_2, positive_2, tf.data.Dataset.from_tensor_slices(tf.ones(len(anchor_2)))))

negatives_1 = tf.data.Dataset.zip((anchor_1, negative.take(300), tf.data.Dataset.from_tensor_slices(tf.zeros(300))))
negatives_2 = tf.data.Dataset.zip((anchor_2, negative.take(300), tf.data.Dataset.from_tensor_slices(tf.zeros(300))))

data = positives_1.concatenate(positives_2).concatenate(negatives_1).concatenate(negatives_2)
#%%
def preprocess_twin(input_img, validation_img, label):
return preprocess(input_img), preprocess(validation_img), label
#%%
data = data.map(preprocess_twin)
data = data.cache()
data = data.shuffle(buffer_size=1024)
#%%
train_data = data.take(round(len(data)*.7))
train_data = train_data.batch(16)
train_data = train_data.prefetch(8)
#%%
test_data = data.skip(round(len(data)*.7))
test_data = test_data.take(round(len(data)*.3))
test_data = test_data.batch(16)
test_data = test_data.prefetch(8)
#%%
def make_embedding():
inp = Input(shape=(105,105,3), name='input_image')

c1 = Conv2D(64, (10,10), activation='relu')(inp)
m1 = MaxPooling2D(64, (2,2), padding='same')(c1)

c2 = Conv2D(128, (7,7), activation='relu')(m1)
m2 = MaxPooling2D(64, (2,2), padding='same')(c2)

c3 = Conv2D(128, (4,4), activation='relu')(m2)
m3 = MaxPooling2D(64, (2,2), padding='same')(c3)

c4 = Conv2D(256, (4,4), activation='relu')(m3)
f1 = Flatten()(c4)
d1 = Dense(4096, activation='sigmoid')(f1)

return Model(inputs=[inp], outputs=[d1], name='embedding')
#%%
embedding = make_embedding()
#%%
class L1Dist(Layer):

def __init__(self, **kwargs):
super().__init__()

def call(self, input_embedding, validation_embedding):
return tf.math.abs(input_embedding - validation_embedding)
#%%
def make_siamese_model():

input_image = Input(name='input_img', shape=(105,105,3))

validation_image = Input(name='validation_img', shape=(105,105,3))

siamese_layer = L1Dist()
siamese_layer._name = 'distance'
distances = siamese_layer(embedding(input_image), embedding(validation_image))

classifier = Dense(1, activation='sigmoid')(distances)

return Model(inputs=[input_image, validation_image], outputs=classifier, name='SiameseNetwork')
#%%
siamese_model = make_siamese_model()
#%%
binary_cross_loss = tf.losses.BinaryCrossentropy()
#%%
opt = tf.keras.optimizers.Adam(1e-4)
#%%
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, 'ckpt')
checkpoint = tf.train.Checkpoint(opt=opt, siamese_model=siamese_model)
#%%
@tf.function
def train_step(batch):

with tf.GradientTape() as tape:
X = batch[:2]
y = batch[2]

yhat = siamese_model(X, training=True)
loss = binary_cross_loss(y, yhat)
print(loss)

grad = tape.gradient(loss, siamese_model.trainable_variables)

opt.apply_gradients(zip(grad,  siamese_model.trainable_variables))

return loss
#%%
from tensorflow.python.keras.metrics import Precision, Recall
#%%
def train(data, EPOCHS):
for epoch in range(1, EPOCHS+1):
print('\n Epoch {}/{}'.format(epoch, EPOCHS))
progbar = tf.keras.utils.Progbar(len(data))

r = Recall()
p = Precision()

for idx, batch in enumerate(data):
loss = train_step(batch)
yhat = siamese_model(batch[:2], training=False)
r.update_state(batch[2], yhat)
p.update_state(batch[2], yhat)
progbar.update(idx+1)
print(loss.numpy(), r.result().numpy(), p.result().numpy())

if epoch % 1 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
#%%
EPOCHS = 2
#%%
train(train_data, EPOCHS)
#%%
siamese_model.save('siamesemodelv2.h5')

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post