ANN return many result differents

I made an application that uses neural networks, its function is to classify the type of user that is, it can be legitimate or intrusive, the data was saved in the database, when I give it in the classify button it returns several results that vary a bit (70%, 75%, 76% ...), for all the classifications I use the same data but I get different results, is this normal?

I used this library. this is the code for classification:

     const synaptic = require('synaptic'); 



var network = new synaptic.Architect.Perceptron(2, 4, 4, 1);

var trainingSet = [
    {
        input: [-1.758, 0.74, -0.921, -0.281, -0.838, -0.445, -0.976, -0.274, -0.815, -0.257, -1.205, -0.174],
        output: [1]
    },
    {input: [0.166, 0.53, -1.043, -0.844, -0.61, -0.145, -0.349, -0.359, -0.918, 0.002, -0.463, 0.211], output: [1]},
    {input: [-0.54, 0.31, -1.024, -0.674, -0.686, 0.034, -1.05, 0.33, -1.022, -0.278, -0.704, 0.075], output: [1]},
    {input: [-0.45, 0.891, -1.027, -0.284, -0.593, -0.264, -1.044, 0.425, -1.009, -0.245, -0.699, 0.238], output: [1]},
    {
        input: [-0.502, 0.503, -0.898, -0.122, -0.133, -0.718, -1.075, 1.295, -1.032, -0.468, -0.731, -0.194],
        output: [1]
    },
    {input: [-0.49, 0.681, -0.801, -0.476, -0.554, -0.052, -1.064, 1.48, -0.956, -0.101, -0.621, 0.075], output: [1]},
    {input: [-0.453, 0.52, -0.893, -0.353, -0.552, -0.689, -1.014, 1.135, -1.066, -0.208, -0.779, -0.281], output: [1]},
    {input: [-1.578, 1.767, 1.88, 2.677, 3.206, -0.575, 1.454, -0.009, 1.217, 0.665, 2.367, -0.089], output: [0]},
    {input: [1.089, -1.369, 1.161, 1.51, 0.977, 3.213, 0.875, 1.36, 0.915, 3.182, 0.135, 3.295], output: [0]},
    {input: [0.597, -0.62, 1.142, -0.79, 0.108, -0.493, 0.811, -1.51, 1.391, -0.562, 1.783, -0.27], output: [0]},
    {input: [0.346, -1.053, 0.444, -1.043, -0.277, -0.888, 1.612, -0.924, 1.141, -1.354, -0.368, -1.005], output: [0]},
    {input: [0.863, -1.678, 0.648, 0.586, 0.207, 0.923, 0.38, -1.109, 1.072, -0.235, -0.015, -0.299], output: [0]},
    {input: [2.1, 0.173, 0.741, 0.676, -0.309, 0.337, 0.508, -0.454, 0.456, 0.537, 0.762, -0.317], output: [0]},
    {input: [0.61, -1.396, 0.59, -0.58, 0.056, -0.235, 0.933, -1.384, 0.626, -0.675, 0.541, -1.262], output: [0]}
];

let intent =
    [[[-1.885, 1.368, -0.794, -0.945, -0.841, -0.394],[ -1.015, 1.12, -0.823, -0.031, -1.356, -0.248]],
    [[-1.903, 0.114, -0.851, -1.075, -0.84, -0.851],[ -0.975, -0.099, -0.803, -0.15, NaN, NaN]],
    [[-1.886, 0.472, -0.891, -0.819, -0.838, -0.487],[ -0.912, -0.204, -0.753, -0.767, -1.238, -0.424]],
    [[-1.865, 1.145, -0.882, -0.588, -0.86, -0.304],[ -0.954, 0.125, -0.763, -1.179, -1.353, -0.5]],
    [[-1.856, 1.124, -0.913, -0.335, -0.86, 0.252],[ -1.022, 0.5, -1.15, 0.723, -1.315, -1.262]],
    [[-1.705, 0.729, -0.092, 0.206, -0.858, -0.519],[-0.958, 0.225, -0.801, -0.229, -1.449, -0.386]],
    [[-1.855, 1.344, -0.894, -0.309, 0.366, -0.134],[ -0.661, 0.965, -0.505, -1.127, -1.383, -0.161]],
    [[-1.861, 0.317, -0.797, -0.295, -0.836, -0.291],[ -0.976, 0.79, -0.819, -0.168, -1.334, -0.058]],
    [[-1.857, 1.083, -0.931, -0.653, -0.831, -0.264],[ -1.034, 0.225, -0.759, 0.1, -1.342, 0.075]],
    [[-1.882, 0.369, -0.926, -0.562, -0.883, 0.31],[ -0.965, 0.465, -0.759, -0.48, NaN, NaN]],
    [[-1.873, 0.86, -0.962, -0.483, -0.867, 0.095],[ -0.982, -0.039, -0.862, -0.529, -1.223, -1.025]],
    [[-1.895, 0.468, -0.964, -0.906, -0.861, 0.353],[ -0.971, -0.219, -0.796, -0.516, -1.395, -0.301]],
    [[-1.822, 1.159, -0.96, -0.707, -0.838, -0.124],[ -0.979, 0.025, -0.854, -0.101, -1.352, -0.558]],
    [[-1.836, 0.43, -0.95, -0.364, -0.874, 0.082],[ -1.037, 0.885, -0.863, -0.712, -1.325, -0.563]],
    [[-1.858, 1.324, -0.892, 0.109, -0.889, 0.079],[ -0.997, 0.1, -0.949, 0.369, -1.246, -0.652]],
    [[-1.836, 0.925, -0.889, -0.338, -0.833, -0.241],[ -1.072, 0.19, -0.908, -0.138, -1.26, -0.447]],
    [[-1.834, 1.41, -0.881, -0.259, -0.839, 0.361],[ 0.863, 0.515, 0.8, -0.306, -1.344, 0.477]],
    [[-1.836, 0.956, -0.892, -0.385, -0.894, 0.109],[ -1.015, 0.03, -0.828, -0.565, -1.343, -0.523]],
    [[-1.892, 0.736, -0.914, -0.324, -0.827, -0.18],[ -1.04, 0.26, -0.848, -0.425, -1.379, -0.317]],
    [[-1.89, 0.554, -0.797, -0.057, -0.235, 1.358],[ -0.716, 1, -0.53, -0.413, -1.269, -0.518]]]

function ads() {
    var trainer = new synaptic.Trainer(network);

    let errors = [];
    trainer.train(trainingSet, {
        rate: 0.005,//0.003
        iterations: 20000,
        error: 0.0001,//0.005
        shuffle: true,
        log: 10000,
        cost: synaptic.Trainer.cost.CROSS_ENTROPY,
        schedule: {
            every: 100,
            do: function (data) {
                errors.push(data.error);
            }
        }
    });

}
let predict = [];
let learningRate = 0.003;

for (let i = 0; i  intent.length; i++) {
    let parcial=[];
    for (let j = 0; j  intent[i].length; j++) {
        ads();
        const predictedLabel = network.activate(intent[i][j]);
        parcial.push(predictedLabel);
    }
    predict.push(parcial);
}
console.log('Answer', predict);

is the behavior of function activation? backpropagation?

Topic ann neural-network

Category Data Science

About

Geeks Mental is a community that publishes articles and tutorials about Web, Android, Data Science, new techniques and Linux security.