The network model to train our time series looks as follows:
sess = tf.Session() num_predictors = len(train_features.columns) num_classes = len(train_labels.columns) feature_data = tf.placeholder("float", [None, num_predictors]) actual_classes = tf.placeholder("float", [None, 2]) weights1 = tf.Variable(tf.truncated_normal([len(codes) * 3, 50], stddev=0.0001)) biases1 = tf.Variable(tf.ones([50])) weights2 = tf.Variable(tf.truncated_normal([50, 25], stddev=0.0001)) biases2 = tf.Variable(tf.ones([25])) weights3 = tf.Variable(tf.truncated_normal([25, 2], stddev=0.0001)) biases3 = tf.Variable(tf.ones([2])) hidden_layer_1 = tf.nn.relu(tf.matmul(feature_data, weights1) + biases1) hidden_layer_2 = tf.nn.relu(tf.matmul(hidden_layer_1, weights2) + biases2) model = tf.nn.softmax(tf.matmul(hidden_layer_2, weights3) + biases3) cost = -tf.reduce_sum(actual_classes * tf.log(model)) train_op1 = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost) init = tf.initialize_all_variables() sess.run(init) correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(actual_classes, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
This is just a simple network with two hidden layers.