Examples of NeuralNetworkImpl


Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     * Simple xor backpropagation test
     */
    @Test
    public void testMLPSigmoidBP() {
  // create multi layer perceptron with one hidden layer and bias
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 2, 8, 1 }, true);

  // create training and testing input providers
  XorInputProvider trainingInput = new XorInputProvider(10000);
  XorInputProvider testingInput = new XorInputProvider(4);

View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testSigmoidBP() {
  Environment.getInstance().setUseDataSharedMemory(false);
  Environment.getInstance().setUseWeightsSharedMemory(false);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 3072, 10 }, true);

  CIFAR10TrainingInputProvider trainInputProvider = new CIFAR10TrainingInputProvider("cifar-10-batches-bin"); // specify your own path
  trainInputProvider.getProperties().setGroupByChannel(true);
  trainInputProvider.getProperties().setScaleColors(true);
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    @Test
    public void testSigmoidBP() {
  Environment.getInstance().setUseDataSharedMemory(false);
  Environment.getInstance().setUseWeightsSharedMemory(false);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 784, 10 }, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    }

    @Ignore
    @Test
    public void testSigmoidHiddenBP() {
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 784, 300, 100, 10 }, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    public void testLeNetSmall() {
  // cpu execution mode
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.CPU);

  // Convolutional network
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 5, 5, 20, 1 }, { 2, 2 }, { 5, 5, 50, 1 }, { 2, 2 }, {512}, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // Mnist dataset provider
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    public void testLeNetTiny() {
  Environment.getInstance().setUseDataSharedMemory(false);
  Environment.getInstance().setUseWeightsSharedMemory(false);

  // very simple convolutional network with a single 2x2 max pooling layer
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 2, 2 }, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    public void testLeNetTiny2() {
  Environment.getInstance().setUseDataSharedMemory(false);
  Environment.getInstance().setUseWeightsSharedMemory(false);

  // very simple convolutional network with a single convolutional layer with 6 5x5 filters and a single 2x2 max pooling layer
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 5, 5, 6, 1 }, {2, 2}, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  bcg.set(0.2f, 1, 0);

  List<Connections> connections = new ArrayList<>();
  connections.add(c1);

  NeuralNetworkImpl nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));

  ValuesProvider vp = TensorFactory.tensorProvider(nn, 2, true);

  Matrix i1 = vp.get(nn.getInputLayer());
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
  i1.set(3, 2, 0);
  i1.set(4, 0, 1);
  i1.set(5, 1, 1);
  i1.set(6, 2, 1);

  ConnectionCalculatorFullyConnected aws = new AparapiWeightedSumConnectionCalculator();
  aws.calculate(connections, vp, ol);

  // most simple case
  Matrix o = vp.get(nn.getOutputLayer());
  assertEquals(14, o.get(0, 0), 0);
  assertEquals(32, o.get(0, 1), 0);
  assertEquals(32, o.get(1, 0), 0);
  assertEquals(77, o.get(1, 1), 0);

  // with bias
  connections = new ArrayList<>();
  connections.add(c1);
  connections.add(bc);

  nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));
  vp = TensorFactory.tensorProvider(nn, 2, true);

  i1 = vp.get(nn.getInputLayer());
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
  i1.set(3, 2, 0);
  i1.set(4, 0, 1);
  i1.set(5, 1, 1);
  i1.set(6, 2, 1);

  aws = new AparapiWeightedSumConnectionCalculator();
  aws.calculate(connections, vp, ol);

  o = vp.get(nn.getOutputLayer());
  assertEquals(14.1, o.get(0, 0), 0.01);
  assertEquals(32.1, o.get(0, 1), 0.01);
  assertEquals(32.2, o.get(1, 0), 0.01);
  assertEquals(77.2, o.get(1, 1), 0.01);

  // combined layers
  connections = new ArrayList<>();
  connections.add(c1);
  connections.add(c2);
  connections.add(bc);
  nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));
  vp = TensorFactory.tensorProvider(nn, 2, true);

  i1 = vp.get(il1);
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
  i1.set(3, 2, 0);
  i1.set(4, 0, 1);
  i1.set(5, 1, 1);
  i1.set(6, 2, 1);

  Matrix i2 = vp.get(il2);
  i2.set(1, 0, 0);
  i2.set(2, 1, 0);
  i2.set(3, 2, 0);
  i2.set(4, 0, 1);
  i2.set(5, 1, 1);
  i2.set(6, 2, 1);

  aws = new AparapiWeightedSumConnectionCalculator();
  aws.calculate(connections, vp, ol);

  o = vp.get(nn.getOutputLayer());
  assertEquals(28.1, o.get(0, 0), 0.01);
  assertEquals(64.1, o.get(0, 1), 0.01);
  assertEquals(64.2, o.get(1, 0), 0.01);
  assertEquals(154.2, o.get(1, 1), 0.01);
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  ConnectionCalculatorFullyConnected aws = new AparapiWeightedSumConnectionCalculator();

  List<Connections> connections = new ArrayList<>();
  connections.add(c1);
  NeuralNetworkImpl nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));
  ValuesProvider vp = TensorFactory.tensorProvider(nn, 2, true);

  Matrix i1 = vp.get(il1);
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
  i1.set(3, 2, 0);
  i1.set(4, 0, 1);
  i1.set(5, 1, 1);
  i1.set(6, 2, 1);

  aws.calculate(connections, vp, ol);

  // most simple case
  Matrix o = vp.get(ol);
  assertEquals(14, o.get(0, 0), 0);
  assertEquals(32, o.get(0, 1), 0);
  assertEquals(32, o.get(1, 0), 0);
  assertEquals(77, o.get(1, 1), 0);

  // with bias
  connections = new ArrayList<>();
  connections.add(c1);
  connections.add(bc);
  nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));
  vp = TensorFactory.tensorProvider(nn, 2, true);
  i1 = vp.get(il1);
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
  i1.set(3, 2, 0);
  i1.set(4, 0, 1);
  i1.set(5, 1, 1);
  i1.set(6, 2, 1);

  aws = new AparapiWeightedSumConnectionCalculator();
  aws.calculate(connections, vp, ol);

  o = vp.get(ol);
  assertEquals(14.1, o.get(0, 0), 0.01);
  assertEquals(32.1, o.get(0, 1), 0.01);
  assertEquals(32.2, o.get(1, 0), 0.01);
  assertEquals(77.2, o.get(1, 1), 0.01);

  // combined layers
  connections = new ArrayList<>();
  connections.add(c1);
  connections.add(c2);
  connections.add(bc);
  nn = new NeuralNetworkImpl();
  nn.addConnections(connections.toArray(new Connections[connections.size()]));
  vp = TensorFactory.tensorProvider(nn, 2, true);

  i1 = vp.get(il1);
  i1.set(1, 0, 0);
  i1.set(2, 1, 0);
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testSigmoidBP() {
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 2, 2, 1 }, false);

  FullyConnected c1 = (FullyConnected) mlp.getInputLayer().getConnections().iterator().next();
  Matrix cg1 = c1.getWeights();
  cg1.set(0.1f, 0, 0);
  cg1.set(0.8f, 0, 1);
  cg1.set(0.4f, 1, 0);
  cg1.set(0.6f, 1, 1);

  FullyConnected c2 = (FullyConnected) mlp.getOutputLayer().getConnections().iterator().next();
  Matrix cg2 = c2.getWeights();
  cg2.set(0.3f, 0, 0);
  cg2.set(0.9f, 0, 1);

  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(mlp, new SimpleInputProvider(new float[][] { { 0.35f, 0.9f } }, new float[][] { { 0.5f } }), new SimpleInputProvider(new float[][] { { 0.35f, 0.9f } }, new float[][] { { 0.5f } }), null, null, 1f, 0f, 0f, 0f, 0f, 1, 1, 1);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.