Tensorflow.js - CNN/or autoencoder denoiser architecture?
I am new to machine learning. I have 10,000 examples of 128x256 array of values 0.0-1.0. Each example consists of a pair of a clean example and the other with noise added. I am aiming to train a CNN / (or an autoencoder?) with these examples. I am currently able to train one dense layer without errors.
My first problem is my prediction is returning a 128x256 int array rather than floats.
My larger question is about finding a starting point for the architecture. One that would reduce dimensionality (with conv and pooling layers?) and then how to return back up to the 128x256 shape?
Here is some of the core aspects of the code:
// The model
const model = tf.sequential();
const IMAGE_WIDTH = 256;
const IMAGE_HEIGHT = 128;
const IMAGE_CHANNELS = 1;
const encoder = tf.layers.dense({
units: 1,
batchInputShape:[null,IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS],
activation: 'relu',
kernelInitializer:randomUniform,
biasInitializer:ones});
model.add(encoder)
model.compile({
optimizer: 'sgd',
loss: 'meanSquaredError'
});
return model;
// Training:
async function train(model, data) {
const metrics = ['loss', 'val_loss', 'acc', 'val_acc'];
const container = {
name: 'Model Training', tab: 'Model', styles: { height: '1000px' }
};
const fitCallbacks = tfvis.show.fitCallbacks(container, metrics);
const BATCH_SIZE = 400;
const TRAIN_DATA_SIZE = 200;
const TEST_DATA_SIZE = 100;
const [trainXs, trainYs] = tf.tidy(() = {
const d = data.nextTrainBatch(TRAIN_DATA_SIZE);
return [
d.xs.reshape([TRAIN_DATA_SIZE, 256, 128, 1]),
d.labels.reshape([TRAIN_DATA_SIZE, 256, 128, 1])
];
});
const [testXs, testYs] = tf.tidy(() = {
const d = data.nextTestBatch(TEST_DATA_SIZE);
return [
d.xs.reshape([TEST_DATA_SIZE, 256, 128, 1]),
d.labels.reshape([TEST_DATA_SIZE, 256, 128, 1])
];
});
return model.fit(trainXs, trainYs, {
batchSize: BATCH_SIZE,
validationData: [testXs, testYs],
epochs: 20,
shuffle: true,
callbacks: fitCallbacks
});
}
//The prediction
const IMAGE_WIDTH = 256;
const IMAGE_HEIGHT = 128;
const testData = data.nextTestBatch(testDataSize);
const testxs = testData.xs.reshape([testDataSize, IMAGE_WIDTH, IMAGE_HEIGHT, 1]);
const labels = testData.labels.argMax(-1);
const preds = model.predict(testxs).argMax(-1);
console.log(preds.dataSync()) //Why is this outputing an Int32Array rather than float?
```