Examples of NeuralNetworkImpl


Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testSigmoidBP2() {
  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 3, 2, 1 }, true);

  List<Connections> c = mlp.getConnections();
  FullyConnected c1 = (FullyConnected) c.get(0);
  Matrix cg1 = c1.getWeights();
  cg1.set(0.2f, 0, 0);
  cg1.set(0.4f, 0, 1);
  cg1.set(-0.5f, 0, 2);
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testSigmoidBPDropout() {
  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 3, 2, 1 }, true);

  List<Connections> c = mlp.getConnections();
  FullyConnected c1 = (FullyConnected) c.get(0);
  Matrix cg1 = c1.getWeights();
  cg1.set(0.2f, 0, 0);
  cg1.set(0.4f, 0, 1);
  cg1.set(-0.5f, 0, 2);
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testMaxoutFF() {
  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl nn = NNFactory.maxout(new int[] { 2, 2 }, true, null);

  List<Connections> c = nn.getConnections();
  FullyConnected c1 = (FullyConnected) c.get(0);
  Matrix cg1 = c1.getWeights();
  cg1.set(0.1f, 0, 0);
  cg1.set(0.5f, 0, 1);
  cg1.set(0.1f, 1, 0);
  cg1.set(0.5f, 1, 1);

  FullyConnected cb1 = (FullyConnected) c.get(1);
  Matrix cgb1 = cb1.getWeights();
  cgb1.set(0.1f, 0, 0);
  cgb1.set(0.2f, 1, 0);

  ValuesProvider results = TensorFactory.tensorProvider(nn, 2, true);
  Matrix in = results.get(nn.getInputLayer());
  in.set(8, 0, 0);
  in.set(2, 1, 0);
  in.set(1, 0, 1);
  in.set(7, 1, 1);

  Set<Layer> calculated = new HashSet<>();
  calculated.add(nn.getInputLayer());
  nn.getLayerCalculator().calculate(nn, nn.getOutputLayer(), calculated, results);

  Matrix out = results.get(nn.getOutputLayer());
  assertEquals(1.1f, out.get(0, 0), 0f);
  assertEquals(1.2f, out.get(1, 0), 0f);
  assertEquals(3.6f, out.get(0, 1), 0f);
  assertEquals(3.7f, out.get(1, 1), 0f);

View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     */
    @Test
    public void testMaxoutBP() {
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl nn = NNFactory.maxout(new int[] { 2, 2 }, true, null);

  List<Connections> c = nn.getConnections();
  FullyConnected c1 = (FullyConnected) c.get(0);
  Matrix cg1 = c1.getWeights();
  cg1.set(0.1f, 0, 0);
  cg1.set(0.5f, 0, 1);
  cg1.set(0.1f, 1, 0);
  cg1.set(0.5f, 1, 1);

  FullyConnected cb1 = (FullyConnected) c.get(1);
  Matrix cgb1 = cb1.getWeights();
  cgb1.set(0.1f, 0, 0);
  cgb1.set(0.2f, 1, 0);

  ValuesProvider results = TensorFactory.tensorProvider(nn, 2, true);
  Matrix in = results.get(nn.getInputLayer());
  in.set(8, 0, 0);
  in.set(2, 1, 0);
  in.set(1, 0, 1);
  in.set(7, 1, 1);

View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    @Test
    public void testSigmoidBP3() {
  Environment.getInstance().setUseDataSharedMemory(true);
  Environment.getInstance().setUseWeightsSharedMemory(true);

  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 3, 2, 1 }, true);

  List<Connections> c = mlp.getConnections();
  FullyConnected c1 = (FullyConnected) c.get(0);
  Matrix cg1 = c1.getWeights();
  cg1.set(0.2f, 0, 0);
  cg1.set(0.4f, 0, 1);
  cg1.set(-0.5f, 0, 2);
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    public void testParallelNetworks() {
  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  Environment.getInstance().setUseWeightsSharedMemory(true);
  ConnectionFactory cf = new ConnectionFactory();
  NeuralNetworkImpl mlp = new NeuralNetworkImpl();
  Layer input = new Layer();
  Layer leaf1 = new Layer();
  Layer leaf2 = new Layer();
  Layer output = new Layer();

  mlp.addLayer(input);

  FullyConnected fc1 = cf.fullyConnected(input, leaf1, 2, 3);
  fc1.getWeights().forEach(i -> fc1.getWeights().getElements()[i] = 0.1f);
  mlp.addConnections(fc1);

  FullyConnected fc2 = cf.fullyConnected(input, leaf2, 2, 3);
  fc2.getWeights().forEach(i -> fc2.getWeights().getElements()[i] = 0.2f);
  mlp.addConnections(fc2);

  FullyConnected fc3 = cf.fullyConnected(leaf1, output, 3, 1);
  fc3.getWeights().forEach(i -> fc3.getWeights().getElements()[i] = 0.3f);
  mlp.addConnections(fc3);
  FullyConnected fc4 = cf.fullyConnected(leaf2, output, 3, 1);
  fc4.getWeights().forEach(i -> fc4.getWeights().getElements()[i] = 0.4f);
  mlp.addConnections(fc4);

  mlp.setLayerCalculator(NNFactory.lcWeightedSum(mlp, null));

  Set<Layer> calculated = new HashSet<>();
  calculated.add(mlp.getInputLayer());

  ValuesProvider results = TensorFactory.tensorProvider(mlp, 1, true);
  results.get(mlp.getInputLayer()).set(2, 0, 0);
  results.get(mlp.getInputLayer()).set(2, 1, 0);

  mlp.getLayerCalculator().calculate(mlp, output, calculated, results);

  assertEquals(1.32, results.get(output).get(0, 0), 0.000001);
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    }

    @Test
    public void testRemoveLayer() {
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl mlp = NNFactory.mlp(new int[] {3, 4, 5}, true);
  assertEquals(5, mlp.getLayers().size(), 0);
  Layer currentOutput = mlp.getOutputLayer();
  mlp.removeLayer(mlp.getOutputLayer());
  assertEquals(3, mlp.getLayers().size(), 0);
  assertEquals(true, currentOutput != mlp.getOutputLayer());
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    @Test
    public void testLayerOrderStrategy() {
  Environment.getInstance().setUseWeightsSharedMemory(true);

  // MLP
  NeuralNetworkImpl mlp = NNFactory.mlp(new int[] {3, 4, 5}, true);
 
  Set<Layer> calculated = new HashSet<Layer>();
  calculated.add(mlp.getInputLayer());
  List<ConnectionCandidate> ccc = new TargetLayerOrderStrategy(mlp, mlp.getOutputLayer(), calculated).order();
  assertEquals(4, ccc.size(), 0);
  Layer l = mlp.getInputLayer();
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  l = l.getConnections().get(0).getOutputLayer();
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));
  assertTrue(ccc.get(2).connection == l.getConnections().get(2));
  l = l.getConnections().get(2).getOutputLayer();
  assertTrue(ccc.get(3).connection == l.getConnections().get(1));

  ccc = new BreadthFirstOrderStrategy(mlp, mlp.getOutputLayer()).order();
  assertEquals(4, ccc.size(), 0);
  l = mlp.getOutputLayer();
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));

  l = l.getConnections().get(0).getInputLayer();
  assertTrue(ccc.get(2).connection == l.getConnections().get(0));
  assertTrue(ccc.get(3).connection == l.getConnections().get(1));

  // Simple MLP
  mlp = NNFactory.mlp(new int[] {3, 4}, true);

  calculated = new HashSet<Layer>();
  calculated.add(mlp.getInputLayer());
  ccc = new TargetLayerOrderStrategy(mlp, mlp.getOutputLayer(), calculated).order();
  assertEquals(2, ccc.size(), 0);
  l = mlp.getOutputLayer();
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));

  ccc = new BreadthFirstOrderStrategy(mlp, mlp.getOutputLayer()).order();
  assertEquals(2, ccc.size(), 0);
  l = mlp.getOutputLayer();
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));

  // CNN
  NeuralNetworkImpl cnn = NNFactory.convNN(new int[][] { { 3, 3, 2 }, { 2, 2, 1, 1 } }, true);

  calculated = new HashSet<Layer>();
  calculated.add(cnn.getInputLayer());
  ccc = new TargetLayerOrderStrategy(cnn, cnn.getOutputLayer(), calculated).order();
  l = cnn.getOutputLayer();
  assertEquals(2, ccc.size(), 0);
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));

  ccc = new BreadthFirstOrderStrategy(cnn, cnn.getOutputLayer()).order();
  l = cnn.getOutputLayer();
  assertEquals(2, ccc.size(), 0);
  assertTrue(ccc.get(0).connection == l.getConnections().get(0));
  assertTrue(ccc.get(1).connection == l.getConnections().get(1));
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  // create multi layer perceptron with one hidden layer and bias
  Environment.getInstance().setUseWeightsSharedMemory(false);
  Environment.getInstance().setUseDataSharedMemory(false);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 2, 2, 1 }, true);

  // create training and testing input providers
  SimpleInputProvider input = new SimpleInputProvider(new float[][] { {0, 0}, {0, 1}, {1, 0}, {1, 1} }, new float[][] { {0}, {1}, {1}, {0} });

  // create backpropagation trainer for the network
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  // create multi layer perceptron with one hidden layer and bias
  Environment.getInstance().setUseWeightsSharedMemory(false);
  Environment.getInstance().setUseDataSharedMemory(false);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 2, 2, 1 }, true);

//        [-5.744886, -5.7570715, -7.329507, -7.33055] - l1-l2
//        [8.59142, 3.1430812] - bias l2
//        [12.749131, -12.848652] - l2-l3
//        [-6.1552725] - bias l3

  // weights
  FullyConnected fc1 = (FullyConnected) mlp.getInputLayer().getConnections().get(0);
  fc1.getWeights().set(-5.744886f, 0, 0);
  fc1.getWeights().set(-5.7570715f, 0, 1);
  fc1.getWeights().set(-7.329507f, 1, 0);
  fc1.getWeights().set(-7.33055f, 1, 1);

  FullyConnected b1 = (FullyConnected) fc1.getOutputLayer().getConnections().get(1);
  b1.getWeights().set(8.59142f, 0, 0);
  b1.getWeights().set(3.1430812f, 1, 0);

  FullyConnected fc2 = (FullyConnected) mlp.getOutputLayer().getConnections().get(0);
  fc2.getWeights().set(12.749131f, 0, 0);
  fc2.getWeights().set(-12.848652f, 0, 1);

  FullyConnected b2 = (FullyConnected) fc2.getOutputLayer().getConnections().get(1);
  b2.getWeights().set(-6.1552725f, 0, 0);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.