Ich versuche, ein Softmax-Regressionsmodell für die CIFAR-Klassifizierung zu erstellen. Als ich zunächst versuchte, meine Bilder und Beschriftungen an das Feed-Wörterbuch zu übergeben, erhielt ich eine Fehlermeldung, die besagte, dass Feed-Wörterbücher keine Tensoren akzeptieren. Ich habe sie dann mit .eval() in Numpy-Arrays umgewandelt, aber das Programm bleibt an der Zeile .eval() hängen und fährt nicht fort. Wie kann ich diese Daten an das Feed_dict übergeben?
CIFARIMAGELOADING.PY
#Set up placeholder vectors for image and labels
x = tf.placeholder(tf.float32, shape = [None, 1728])
y_ = tf.placeholder(tf.float32, shape = [None,10])
W = tf.Variable(tf.zeros([1728,10]))
b = tf.Variable(tf.zeros([10]))
#Implement regression model. Multiply input images x by weight matrix W, add the bias b
#Compute the softmax probabilities that are assigned to each class
y = tf.nn.softmax(tf.matmul(x,W) + b)
#Define cross entropy
#tf.reduce sum sums across all classes and tf.reduce_mean takes the average over these sums
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices = [1]))
#Train the model
#Each training iteration we load 128 training examples. We then run the train_step operation
#using feed_dict to replace the placeholder tensors x and y_ with the training examples
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#Open up a Session
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000) :
images, labels = CIFARImageLoading.loadimagesandlabels(size=BATCH_SIZE)
unrolled_images = tf.reshape(images, (1728, BATCH_SIZE))
#convert labels to their one_hot representations
# should produce [[1,0,0,...],[0,1,0...],...]
one_hot_labels = tf.one_hot(indices= labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1)
print(unrolled_images)
print(one_hot_labels)
images_numpy, labels_numpy = unrolled_images.eval(session=sess), one_hot_labels.eval(session=sess)
sess.run(train_step, feed_dict = {x: images_numpy, y_:labels_numpy})
#Evaluate the model
#.equal returns a tensor of booleans, we want to cast these as floats and then take their mean
#to get percent correctness (accuracy)
print("evaluating")
test_images, test_labels = CIFARImageLoading.loadimagesandlabels(TEST_SIZE)
test_images_unrolled = tf.reshape(test_images, (1728, TEST_SIZE))
test_images_one_hot = tf.one_hot(indices= test_labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict = {x: unrolled_images.eval(), y_ : test_images_one_hot.eval()}))
Ich versuche, ein Softmax-Regressionsmodell für die CIFAR-Klassifizierung zu erstellen. Als ich zunächst versuchte, meine Bilder und Beschriftungen an das Feed-Wörterbuch zu übergeben, erhielt ich eine Fehlermeldung, die besagte, dass Feed-Wörterbücher keine Tensoren akzeptieren. Ich habe sie dann mit .eval() in Numpy-Arrays umgewandelt, aber das Programm bleibt an der Zeile .eval() hängen und fährt nicht fort. Wie kann ich diese Daten an das Feed_dict übergeben? CIFARIMAGELOADING.PY [code]import tensorflow as tf import os import tensorflow.models.image.cifar10 as cf
IMAGE_SIZE = 24 BATCH_SIZE = 128
def loadimagesandlabels(size): # Load the images from the CIFAR data directory FLAGS = tf.app.flags.FLAGS data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)] filename_queue = tf.train.string_input_producer(filenames) read_input = cf.cifar10_input.read_cifar10(filename_queue)
# Generate a batch of images and labels by building up a queue of examples print('Filling queue with CIFAR images') num_preprocess_threads = 16 min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(BATCH_SIZE*min_fraction_of_examples_in_queue)
images, label_batch = tf.train.batch([cropped_image,read_input.label],batch_size=BATCH_SIZE, num_threads=num_preprocess_threads, capacity=min_queue_examples+3*BATCH_SIZE) print(images) print(label_batch) return images, tf.reshape(label_batch, [BATCH_SIZE]) [/code] CIFAR.PY [code]#Set up placeholder vectors for image and labels x = tf.placeholder(tf.float32, shape = [None, 1728]) y_ = tf.placeholder(tf.float32, shape = [None,10]) W = tf.Variable(tf.zeros([1728,10])) b = tf.Variable(tf.zeros([10]))
#Implement regression model. Multiply input images x by weight matrix W, add the bias b #Compute the softmax probabilities that are assigned to each class y = tf.nn.softmax(tf.matmul(x,W) + b)
#Define cross entropy #tf.reduce sum sums across all classes and tf.reduce_mean takes the average over these sums cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y), reduction_indices = [1]))
#Train the model #Each training iteration we load 128 training examples. We then run the train_step operation #using feed_dict to replace the placeholder tensors x and y_ with the training examples train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#Open up a Session init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000) : images, labels = CIFARImageLoading.loadimagesandlabels(size=BATCH_SIZE) unrolled_images = tf.reshape(images, (1728, BATCH_SIZE))
#convert labels to their one_hot representations # should produce [[1,0,0,...],[0,1,0...],...] one_hot_labels = tf.one_hot(indices= labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1)
#Evaluate the model #.equal returns a tensor of booleans, we want to cast these as floats and then take their mean #to get percent correctness (accuracy) print("evaluating") test_images, test_labels = CIFARImageLoading.loadimagesandlabels(TEST_SIZE) test_images_unrolled = tf.reshape(test_images, (1728, TEST_SIZE)) test_images_one_hot = tf.one_hot(indices= test_labels, depth=NUM_CLASSES, on_value=1.0, off_value= 0.0, axis=-1) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(accuracy.eval(feed_dict = {x: unrolled_images.eval(), y_ : test_images_one_hot.eval()})) [/code]
Tensorflow-Version 2.3.1
Numpy-Version 1.20
unter dem Code
# define model
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
model.add(Dense(1))...
Ich habe das einführende Beispiel der Matrixmultiplikation in TensorFlow verwendet.
matrix1 = tf.constant([ ])
matrix2 = tf.constant([ , ])
product = tf.matmul(matrix1, matrix2)