Examples of NeuralNetworkImpl


Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    }

    @Ignore
    @Test
    public void testSigmoidHiddenBP() {
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 784, 300, 100, 10 }, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte", 1, 2, new MnistTargetMultiNeuronOutputConverter());
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte", 1000, 1, new MnistTargetMultiNeuronOutputConverter());
  testInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    }

    @Test
    public void testLeNetSmall() {
  // Convolutional network
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 5, 5, 20, 1 }, { 2, 2 }, { 5, 5, 50, 1 }, { 2, 2 }, {512}, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // Mnist dataset provider
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte", 1, 1, new MnistTargetMultiNeuronOutputConverter());
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    }

    @Test
    public void testLeNetTiny() {
  // very simple convolutional network with a single 2x2 max pooling layer
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 2, 2 }, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte", 1, 1, new MnistTargetMultiNeuronOutputConverter());
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

     * MNIST small LeNet network
     */
    @Test
    public void testLeNetTiny2() {
  // very simple convolutional network with a single convolutional layer with 6 5x5 filters and a single 2x2 max pooling layer
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 28, 28, 1 }, { 5, 5, 6, 1 }, {2, 2}, {10} }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte", 1, 1, new MnistTargetMultiNeuronOutputConverter());
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  if (layers[0].length != 3) {
      throw new IllegalArgumentException("first layer must be convolutional");
  }

  NeuralNetworkImpl result = new NeuralNetworkImpl();

  Layer prev = null;
  int prevUnitCount = layers[0][0] * layers[0][1] * layers[0][2];
  result.addLayer(prev = new Layer());
  for (int i = 1; i < layers.length; i++) {
      int[] l = layers[i];
      Layer newLayer = null;
      Layer biasLayer = null;
      if (l.length == 1) {
    new FullyConnected(prev, newLayer = new Layer(), prevUnitCount, l[0]);
    if (addBias) {
        new FullyConnected(biasLayer = new Layer(), newLayer, 1, l[0]);
    }

    prevUnitCount = l[0];
      } else if (l.length == 4 || l.length == 2) {
    Integer inputFMRows = null;
    Integer inputFMCols = null;
    Integer filters = null;
    if (i == 1) {
        inputFMRows = layers[0][0];
        inputFMCols = layers[0][1];
        filters = layers[0][2];
    } else {
        for (Connections c : prev.getConnections()) {
      if (c.getOutputLayer() == prev) {
          if (c instanceof Conv2DConnection) {
        Conv2DConnection cc = (Conv2DConnection) c;
        inputFMRows = cc.getOutputFeatureMapRows();
        inputFMCols = cc.getOutputFeatureMapColumns();
        filters = cc.getOutputFilters();
        break;
          } else if (c instanceof Subsampling2DConnection) {
        Subsampling2DConnection sc = (Subsampling2DConnection) c;
        inputFMRows = sc.getOutputFeatureMapRows();
        inputFMCols = sc.getOutputFeatureMapColumns();
        filters = sc.getFilters();
        break;
          }
      }
        }
    }

    if (l.length == 4) {
        Conv2DConnection c = new Conv2DConnection(prev, newLayer = new Layer(), inputFMRows, inputFMCols, filters, l[0], l[1], l[2], l[3]);
        if (addBias) {
      new Conv2DConnection(biasLayer = new Layer(), newLayer, c.getOutputFeatureMapRows(), c.getOutputFeatureMapColumns(), 1, 1, 1, l[2], l[3]);
        }

        prevUnitCount = c.getOutputUnitCount();
    } else if (l.length == 2) {
        Subsampling2DConnection c = new Subsampling2DConnection(prev, newLayer = new Layer(), inputFMRows, inputFMCols, l[0], l[1], filters);
        prevUnitCount = c.getOutputUnitCount();
    }
      }

      result.addLayer(newLayer);
      if (biasLayer != null) {
    result.addLayer(biasLayer);
      }

      prev = newLayer;
  }

View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

    public static NeuralNetworkImpl mlp(int[] layers, boolean addBias) {
  if (layers.length <= 1) {
      throw new IllegalArgumentException("more than one layer is required");
  }

  NeuralNetworkImpl result = new NeuralNetworkImpl();
  addFullyConnectedLayer(result, new Layer(), layers[0], layers[0], addBias);
  for (int i = 1; i < layers.length; i++) {
      addFullyConnectedLayer(result, new Layer(), layers[i - 1], layers[i], addBias);
  }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

      throw new IllegalArgumentException("LayerCalculator type not supported");
  }
    }

    public static NeuralNetworkImpl mlpSigmoid(int[] layers, boolean addBias) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcSigmoid(result, null));
  return result;
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  result.setLayerCalculator(lcSigmoid(result, null));
  return result;
    }

    public static NeuralNetworkImpl mlpSoftRelu(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcSoftRelu(result, outputCC));
  return result;
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  result.setLayerCalculator(lcSoftRelu(result, outputCC));
  return result;
    }

    public static NeuralNetworkImpl mlpRelu(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcRelu(result, outputCC));
  return result;
    }
View Full Code Here

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl

  result.setLayerCalculator(lcRelu(result, outputCC));
  return result;
    }

    public static NeuralNetworkImpl mlpTanh(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcTanh(result, outputCC));
  return result;
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.