https://youtu.be/-9H_eVZdtN8
Here it is the TensorFlow neuronal network to solve it:
tf_nn.py
Code: Select all | Expand
# Building a neuronal network with TensorFlowimport tensorflow as tfdef multilayer_perceptron( x, weights, biases ): # Hidden layer with RELU activation layer_1 = tf.add( tf.matmul( x, weights[ 'h1' ] ), biases[ 'b1' ] ) layer_1 = tf.nn.relu(layer_1) # Output layer with linear activation out_layer = tf.matmul( layer_1, weights[ 'out' ] ) + biases[ 'out' ] return out_layersession = tf.Session()nInputs = 7 # Number of inputs to the neuronal networknHiddenPerceptrons = 12nTypes = 10 # Number of different types in the outputnLearningRate = 0.002nTrainingEpochs = 500# Input dataaInputs = [ [ 1, 1, 1, 0, 1, 1, 1 ], # zero 2 [ 1, 0, 0, 0, 0, 0, 1 ], # one ------- [ 1, 1, 0, 1, 1, 1, 0 ], # two 3 | | 1 [ 1, 1, 0, 1, 0, 1, 1 ], # three | 4 | [ 1, 0, 1, 1, 0, 0, 1 ], # four ------- [ 0, 1, 1, 1, 0, 1, 1 ], # five | | [ 0, 1, 1, 1, 1, 1, 1 ], # six 5 | | 7 [ 1, 1, 0, 0, 0, 0, 1 ], # seven ------- [ 1, 1, 1, 1, 1, 1, 1 ], # eight 6 [ 1, 1, 1, 1, 0, 1, 1 ] ] # nineaOutputs = [ [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ], [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0 ], [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 ], [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 ], [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 ], [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0 ], [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 ] ]input = tf.placeholder( "float", shape=( None, nInputs ) )output = tf.placeholder( "float", shape=( None, nTypes ) )# Store layers weight & biasweights = { 'h1': tf.Variable(tf.random_normal( [ nInputs, nHiddenPerceptrons ] ) ), 'out': tf.Variable(tf.random_normal( [ nHiddenPerceptrons, nTypes ] ) ) }biases = { 'b1': tf.Variable( tf.random_normal( [ nHiddenPerceptrons ] ) ), 'out': tf.Variable( tf.random_normal( [ nTypes ] ) ) }# Create modelnetwork = multilayer_perceptron( input, weights, biases )loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits=network, labels=output ) )optimizer = tf.train.AdamOptimizer( learning_rate = nLearningRate ).minimize( loss )init = tf.global_variables_initializer()with tf.Session() as session: session.run( init ) # Training cycle for epoch in range( nTrainingEpochs ) : avg_error = 0 for n in range( len( aInputs ) ) : cost = session.run( [ optimizer, loss ], { input: [ aInputs[ n ] ], output: [ aOutputs[ n ] ] } ) # Compute average error avg_error += cost[ 1 ] / len( aInputs ) print( "Epoch:", '%04d' % ( epoch + 1 ), "error=", "{:.9f}".format( avg_error ) ) print( "Optimization Finished" ) # Test model on train data print( "Testing:" ) for n in range( len( aInputs ) ) : print( tf.argmax( network, 1 ).eval( { input: [ aInputs[ n ] ] } )[ 0 ] )
From a cmd window type:
python tf_nn.py
and you will see how the neuronal network learns (the error decreases) and finally when we test it, the right values appear!
Epoch: 0491 error= 0.010902708
Epoch: 0492 error= 0.010839775
Epoch: 0493 error= 0.010745070
Epoch: 0494 error= 0.010681662
Epoch: 0495 error= 0.010590052
Epoch: 0496 error= 0.010513857
Epoch: 0497 error= 0.010433172
Epoch: 0498 error= 0.010375975
Epoch: 0499 error= 0.010283007
Epoch: 0500 error= 0.010227598
Optimization Finished
Testing:
0
1
2
3
4
5
6
7
8
9