Multioutput Neural Network for function approximation

I am trying to extend the example here to be capable of handling multiple outputs for function approximations

import numpy as np # helps with the math
import random as r
import plotly.graph_objects as go

# full data set
x = np.linspace(0, np.pi, 100)
y = np.sin(x)

# input data
p = 1/2                             # fraction of data to use in training
N = int(len(x)*p)                   # number of data points in the full set that corresponds to above fraction
idx = r.sample(range(0, len(x)), N) # indices of randomly chosen data in the full set
inputs = np.array(x[[idx]])

# output data
outputs = np.array(y[[idx]])

# validation data
idx_inv = [i for i in range(0, len(x)) if i not in idx] # validation set indices
x_val = np.array(x[[idx_inv]])

# create NeuralNetwork class
class NeuralNetwork:

    # intialize variables in class
    def __init__(self, inputs, outputs):
        self.inputs = inputs
        self.outputs = outputs

        # initialize weights stochastically
        self.weights = np.random.normal(0, 1, len(self.inputs))
        self.error_history = []
        self.epoch_list = []
        self.weights_list = []

    # activation function == S(x) = 1/1+e^(-x)
    def sigmoid(self, x, deriv=False):
        # print(np.shape(x))
        if deriv == True:
            return x * (1 - x)
        return 1 / (1 + np.exp(-x))

    # data will flow through the neural network.
    def feed_forward(self):
        self.hidden = self.sigmoid(np.dot(self.inputs, self.weights))

    # going backwards through the network to update weights
    def backpropagation(self, rate):
        self.error = self.outputs - self.hidden
        delta = self.error * self.sigmoid(self.hidden, deriv=True)
        self.weights += rate * np.dot(self.inputs.T, delta)
        weights = self.weights.copy()
        self.weights_list.append(weights)

    def train(self, epochs=25000, rate=1.0):
        for epoch in range(epochs):
            # flow forward and produce an output
            self.feed_forward()
            # go back though the network to make corrections based on the output
            self.backpropagation(rate)
            # keep track of the error history over each epoch
            self.error_history.append(np.average(np.abs(self.error)))
            self.epoch_list.append(epoch)

    # function to predict output on new and unseen input data
    def predict(self, new_input):
        prediction = self.sigmoid(new_input * self.weights)
        return prediction

# create neural network
NN = NeuralNetwork(inputs, outputs)
# train neural network
NN.train(epochs=20000, rate=.1)

# predict the validation set
y_val = NN.predict(x_val)

plots = []
plots.append(go.Scatter(x=x, y=y))
plots.append(go.Scatter(x=inputs, y=outputs, mode='markers'))
plots.append(go.Scatter(x=x_val, y=y_val))
fig0 = go.Figure(plots)
fig0.show()

# plot the error over the entire training duration
fig = go.Figure(go.Scatter(x=NN.epoch_list, y=NN.error_history))
# fig.show()

But clearly my results are far off:

I'm nearly positive it has to do with how I am handling the feedforward and backpropogation. What am I doing wrong? By the way, I cannot use external ML libraries.

Topic activation-function neural-network

Category Data Science

About

Geeks Mental is a community that publishes articles and tutorials about Web, Android, Data Science, new techniques and Linux security.