Package com.github.neuralnetworks.architecture.types

Examples of com.github.neuralnetworks.architecture.types.Autoencoder


     * Autoencoder backpropagation
     */
    @Test
    public void testAEBackpropagation() {
  // autoencoder with 6 input/output and 2 hidden units
  Autoencoder ae = NNFactory.autoencoderSigmoid(6, 2, true);

  // We'll use a simple dataset of symptoms of a flu illness. There are 6
  // input features and the first three are symptoms of the illness - for
  // example 1 0 0 0 0 0 means that a patient has high temperature, 0 1
  // 0 0 0 0 - coughing, 1 1 0 0 0 0 - coughing and high temperature
  // and so on. The second three features are "counter" symptomps - when a
  // patient has one of those it is less likely that he's sick. For
  // example 0 0 0 1 0 0 means that he has a flu vaccine. It's possible
  // to have combinations between both - for exmample 0 1 0 1 0 0 means
  // that the patient is vaccinated, but he's also coughing. We will
  // consider a patient to be sick when he has at least two of the first
  // three and healthy if he has two of the second three
  TrainingInputProvider trainInputProvider = new SimpleInputProvider(new float[][] { { 1, 1, 1, 0, 0, 0 }, { 1, 0, 1, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0 }, { 0, 0, 0, 1, 1, 1 }, { 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1 }, { 0, 0, 0, 1, 1, 0 } }, null, 1000, 1);
  TrainingInputProvider testInputProvider = new SimpleInputProvider(new float[][] { { 1, 1, 1, 0, 0, 0 }, { 1, 0, 1, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0 }, { 0, 0, 0, 1, 1, 1 }, { 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1 }, { 0, 0, 0, 1, 1, 0 } }, new float[][] { { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 } }, 10, 1);
  MultipleNeuronsOutputError error = new MultipleNeuronsOutputError();

  // backpropagation for autoencoders
  BackPropagationAutoencoder t = TrainerFactory.backPropagationAutoencoder(ae, trainInputProvider, testInputProvider, error, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.1f, 0.5f, 0f, 0f, 0f);

  // log data
  t.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), true, false));

  // sequential execution for debugging
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  // training
  t.train();

  // the output layer is removed, thus making the hidden layer the new output
  ae.removeLayer(ae.getOutputLayer());

  // testing
  t.test();

  assertEquals(0, t.getOutputError().getTotalNetworkError(), 0);
View Full Code Here


    }

    @Test
    public void testAE() {
  // create autoencoder with visible layer with 4 neurons and hidden layer with 3 neurons
      Autoencoder ae = NNFactory.autoencoderSigmoid(4, 3, true);

      // training, testing and error
      TrainingInputProvider trainInputProvider = new IrisInputProvider(1, 15000, new IrisTargetMultiNeuronOutputConverter(), false, true, false);
      TrainingInputProvider testInputProvider = new IrisInputProvider(1, 150, new IrisTargetMultiNeuronOutputConverter(), false, true, false);
      MultipleNeuronsOutputError error = new MultipleNeuronsOutputError();

      // backpropagation autoencoder training
      BackPropagationAutoencoder bae = TrainerFactory.backPropagationAutoencoder(ae, trainInputProvider, testInputProvider, error, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.25f, 0.5f, 0f, 0f, 0f);

      // log data to console
      bae.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName()));

      // execution mode
      Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

      bae.train();

      // the output layer is needed only during the training phase...
      ae.removeLayer(ae.getOutputLayer());

      bae.test();

      // 2 of the iris classes are linearly not separable - an error of 1/3 illustrates that
      assertEquals(0, bae.getOutputError().getTotalNetworkError(), 1/3f);
View Full Code Here

    public void testSAE() {
  // create stacked autoencoder with input layer of size 4, hidden layer of the first AE with size 4 and hidden layer of the second AE with size 3
  StackedAutoencoder sae = NNFactory.saeSigmoid(new int[] { 4, 4, 3 }, true);

  // stacked networks
  Autoencoder firstNN = sae.getFirstNeuralNetwork();
  firstNN.setLayerCalculator(NNFactory.lcSigmoid(firstNN, null));

  Autoencoder lastNN = sae.getLastNeuralNetwork();
  lastNN.setLayerCalculator(NNFactory.lcSigmoid(lastNN, null));

  // trainers for each of the stacked networks
  BackPropagationAutoencoder firstTrainer = TrainerFactory.backPropagationAutoencoder(firstNN, null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.001f, 0.5f, 0f, 0f, 0f);
  BackPropagationAutoencoder secondTrainer = TrainerFactory.backPropagationAutoencoder(lastNN, null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.001f, 0.5f, 0f, 0f, 0f);
View Full Code Here

  Environment.getInstance().setExecutionMode(EXECUTION_MODE.CPU);

  // create autoencoder with visible layer with 4 neurons and hidden layer with 3 neurons
  Environment.getInstance().setUseWeightsSharedMemory(true);
  Environment.getInstance().setUseDataSharedMemory(true);
      Autoencoder ae = NNFactory.autoencoderSigmoid(4, 3, true);

      // training, testing and error
      IrisInputProvider trainInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  trainInputProvider.addInputModifier(new ScalingInputFunction(trainInputProvider));

  IrisInputProvider testInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  testInputProvider.addInputModifier(new ScalingInputFunction(testInputProvider));

      MultipleNeuronsOutputError error = new MultipleNeuronsOutputError();

      // backpropagation autoencoder training
      BackPropagationAutoencoder bae = TrainerFactory.backPropagationAutoencoder(ae, trainInputProvider, testInputProvider, error, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.7f, 0f, 0f, 0f, 1, 1, 100);

      // log data to console
      bae.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName()));

      bae.train();

      // the output layer is needed only during the training phase...
      ae.removeLayer(ae.getOutputLayer());

      bae.test();

      // 2 of the iris classes are linearly not separable - an error of 1/3 illustrates that
      assertEquals(0, bae.getOutputError().getTotalNetworkError(), 2/3f);
View Full Code Here

  IrisInputProvider testInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  testInputProvider.addInputModifier(new ScalingInputFunction(testInputProvider));

  // stacked networks
  Autoencoder firstNN = sae.getFirstNeuralNetwork();
  firstNN.setLayerCalculator(NNFactory.lcSigmoid(firstNN, null));

  Autoencoder lastNN = sae.getLastNeuralNetwork();
  lastNN.setLayerCalculator(NNFactory.lcSigmoid(lastNN, null));

  // trainers for each of the stacked networks
  BackPropagationAutoencoder firstTrainer = TrainerFactory.backPropagationAutoencoder(firstNN, null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f), 0.5f), 0.02f, 0.7f, 0f, 0f, 0f, 150, 1, 2000);
  BackPropagationAutoencoder secondTrainer = TrainerFactory.backPropagationAutoencoder(lastNN, null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f), 0.5f), 0.02f, 0.7f, 0f, 0f, 0f, 150, 1, 2000);
View Full Code Here

    }


    @Test
    public void testAE() {
  Autoencoder nn = NNFactory.autoencoderSigmoid(784, 10, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte", 1, 1, new MnistTargetMultiNeuronOutputConverter());
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte", 1000, 1, new MnistTargetMultiNeuronOutputConverter());
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  Trainer<?> t = TrainerFactory.backPropagationAutoencoder(nn, trainInputProvider, testInputProvider,  new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.01f, 0.5f, 0f, 0f, 0f);

  t.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), false, true));
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.CPU);
  t.train();
  nn.removeLayer(nn.getOutputLayer());
  t.test();

  assertEquals(0, t.getOutputError().getTotalNetworkError(), 0.1);
    }
View Full Code Here

  Environment.getInstance().setExecutionMode(EXECUTION_MODE.CPU);
  Environment.getInstance().setUseDataSharedMemory(true);
  Environment.getInstance().setUseWeightsSharedMemory(true);

  // autoencoder with 6 input/output and 2 hidden units
  Autoencoder ae = NNFactory.autoencoderSigmoid(6, 2, true);

  // We'll use a simple dataset of symptoms of a flu illness. There are 6
  // input features and the first three are symptoms of the illness - for
  // example 1 0 0 0 0 0 means that a patient has high temperature, 0 1
  // 0 0 0 0 - coughing, 1 1 0 0 0 0 - coughing and high temperature
  // and so on. The second three features are "counter" symptomps - when a
  // patient has one of those it is less likely that he's sick. For
  // example 0 0 0 1 0 0 means that he has a flu vaccine. It's possible
  // to have combinations between both - for exmample 0 1 0 1 0 0 means
  // that the patient is vaccinated, but he's also coughing. We will
  // consider a patient to be sick when he has at least two of the first
  // three and healthy if he has two of the second three
  TrainingInputProvider trainInputProvider = new SimpleInputProvider(new float[][] { { 1, 1, 1, 0, 0, 0 }, { 1, 0, 1, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0 }, { 0, 0, 0, 1, 1, 1 }, { 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1 }, { 0, 0, 0, 1, 1, 0 } }, null);
  TrainingInputProvider testInputProvider = new SimpleInputProvider(new float[][] { { 1, 1, 1, 0, 0, 0 }, { 1, 0, 1, 0, 0, 0 }, { 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0 }, { 0, 0, 0, 1, 1, 1 }, { 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1 }, { 0, 0, 0, 1, 1, 0 } }, new float[][] { { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 } });
  MultipleNeuronsOutputError error = new MultipleNeuronsOutputError();

  // backpropagation for autoencoders
  BackPropagationAutoencoder t = TrainerFactory.backPropagationAutoencoder(ae, trainInputProvider, testInputProvider, error, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.7f, 0f, 0f, 0f, 1, 1, 100);

  // log data
  t.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), true, false));

  // early stopping
  //t.addEventListener(new EarlyStoppingListener(t.getTrainingInputProvider(), 1000, 0.1f));

  // training
  t.train();

  // the output layer is removed, thus making the hidden layer the new output
  ae.removeLayer(ae.getOutputLayer());

  // testing
  t.test();

  assertEquals(0, t.getOutputError().getTotalNetworkError(), 0);
View Full Code Here

    @Test
    public void testSAECalculation() {
  StackedAutoencoder sae = NNFactory.sae(new int [] {3, 2, 2}, true);
  sae.setLayerCalculator(NNFactory.lcWeightedSum(sae, null));

  Autoencoder firstAE = sae.getFirstNeuralNetwork();
  Util.fillArray(((GraphConnections) firstAE.getConnection(firstAE.getInputLayer(), firstAE.getHiddenLayer())).getConnectionGraph().getElements(), 0.2f);
  Util.fillArray(((GraphConnections) firstAE.getConnection(firstAE.getHiddenBiasLayer(), firstAE.getHiddenLayer())).getConnectionGraph().getElements(), 0.3f);
  Util.fillArray(((GraphConnections) firstAE.getConnection(firstAE.getHiddenLayer(), firstAE.getOutputLayer())).getConnectionGraph().getElements(), 0.8f);
  Util.fillArray(((GraphConnections) firstAE.getConnection(firstAE.getOutputBiasLayer(), firstAE.getOutputLayer())).getConnectionGraph().getElements(), 0.9f);

  Autoencoder secondAE = sae.getLastNeuralNetwork();
  Util.fillArray(((GraphConnections) secondAE.getConnection(secondAE.getInputLayer(), secondAE.getHiddenLayer())).getConnectionGraph().getElements(), 0.4f);
  Util.fillArray(((GraphConnections) secondAE.getConnection(secondAE.getHiddenBiasLayer(), secondAE.getHiddenLayer())).getConnectionGraph().getElements(), 0.5f);
  Util.fillArray(((GraphConnections) secondAE.getConnection(secondAE.getHiddenLayer(), secondAE.getOutputLayer())).getConnectionGraph().getElements(), 0.7f);
  Util.fillArray(((GraphConnections) secondAE.getConnection(secondAE.getOutputBiasLayer(), secondAE.getOutputLayer())).getConnectionGraph().getElements(), 0.9f);

  Set<Layer> calculatedLayers = new HashSet<>();
  calculatedLayers.add(sae.getInputLayer());

  ValuesProvider results = new ValuesProvider();
View Full Code Here

    public void testSAECalculation() {
  Environment.getInstance().setUseWeightsSharedMemory(true);
  StackedAutoencoder sae = NNFactory.sae(new int [] {3, 2, 2}, true);
  sae.setLayerCalculator(NNFactory.lcWeightedSum(sae, null));

  Autoencoder firstAE = sae.getFirstNeuralNetwork();
  Tensor t = ((FullyConnected) firstAE.getConnection(firstAE.getInputLayer(), firstAE.getHiddenLayer())).getWeights();
  float[] e1 = t.getElements();
  t.forEach(i -> e1[i] = 0.2f);

  t = ((FullyConnected) firstAE.getConnection(firstAE.getHiddenBiasLayer(), firstAE.getHiddenLayer())).getWeights();
  float[] e2 = t.getElements();
  t.forEach(i -> e2[i] = 0.3f);

  t = ((FullyConnected) firstAE.getConnection(firstAE.getHiddenLayer(), firstAE.getOutputLayer())).getWeights();
  float[] e3 = t.getElements();
  t.forEach(i -> e3[i] = 0.8f);

  t = ((FullyConnected) firstAE.getConnection(firstAE.getOutputBiasLayer(), firstAE.getOutputLayer())).getWeights();
  float[] e4 = t.getElements();
  t.forEach(i -> e4[i] = 0.9f);

  Autoencoder secondAE = sae.getLastNeuralNetwork();

  t = ((FullyConnected) secondAE.getConnection(secondAE.getInputLayer(), secondAE.getHiddenLayer())).getWeights();
  float[] e5 = t.getElements();
  t.forEach(i -> e5[i] = 0.4f);
 
  t = ((FullyConnected) secondAE.getConnection(secondAE.getHiddenBiasLayer(), secondAE.getHiddenLayer())).getWeights();
  float[] e6 = t.getElements();
  t.forEach(i -> e6[i] = 0.5f);

  t = ((FullyConnected) secondAE.getConnection(secondAE.getHiddenLayer(), secondAE.getOutputLayer())).getWeights();
  float[] e7 = t.getElements();
  t.forEach(i -> e7[i] = 0.7f);

  t = ((FullyConnected) secondAE.getConnection(secondAE.getOutputBiasLayer(), secondAE.getOutputLayer())).getWeights();
  float[] e8 = t.getElements();
  t.forEach(i -> e8[i] = 0.9f);

  Set<Layer> calculatedLayers = new HashSet<>();
  calculatedLayers.add(sae.getInputLayer());
View Full Code Here

TOP

Related Classes of com.github.neuralnetworks.architecture.types.Autoencoder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.