Package org.fnlp.ml.types.alphabet

Examples of org.fnlp.ml.types.alphabet.LabelAlphabet


      templets = new TempletGroup();
      templets.load(templateFile);
    }

    //类别集合
    LabelAlphabet labels = factory.DefaultLabelAlphabet();
    // 特征集合
    IFeatureAlphabet features = factory.DefaultFeatureAlphabet();

    featurePipe = new Sequence2FeatureSequence(templets, features, labels);
View Full Code Here


          new InputStreamReader(new FileInputStream(from), "gbk"));
   
    ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream (
        new GZIPOutputStream (new FileOutputStream(to))));
    AlphabetFactory factory = AlphabetFactory.buildFactory();
    LabelAlphabet labels = factory.DefaultLabelAlphabet();
    IFeatureAlphabet features = factory.DefaultFeatureAlphabet();;
    String s;
    rd.readLine()//version
    List lst = new ArrayList();      //template
    while(true) {
      s = rd.readLine();
      if(s.isEmpty()) break;
      lst.add(s);
    }
    out.writeInt(lst.size());
    Iterator it1 = lst.iterator();
    while(it1.hasNext()) {
      out.writeObject(it1.next());
    }
   
    s = rd.readLine();          //#label
    int nLabel = Integer.parseInt(s);
    System.out.println(nLabel);
    for(int i=0; i<nLabel; i++) {
      s = rd.readLine();        //label
      labels.lookupIndex(s);
    }
    out.writeObject(labels);
    rd.readLine();            //blank line
    rd.readLine();            //#column
    rd.readLine();            //blank line
View Full Code Here

    int len = 0;
    Loss loss = new HammingLoss();

    String[][] predictSet = new String[testSet.size()][];
    String[][] goldSet = new String[testSet.size()][];
    LabelAlphabet la = cl.getAlphabetFactory().DefaultLabelAlphabet();
    for (int i = 0; i < testSet.size(); i++) {
      Instance carrier = testSet.get(i);
      int[] pred = (int[]) cl.classify(carrier).getLabel(0);
      if (hasLabel) {
        len += pred.length;
        float e = loss.calc(carrier.getTarget(), pred);
        error += e;
        if(e != 0)
          senError++;

      }
      predictSet[i] = la.lookupString(pred);
      if(hasLabel)
        goldSet[i] = la.lookupString((int[])carrier.getTarget());
    }

    long endtime = System.currentTimeMillis();
    System.out.println("总时间:\t" + (endtime - starttime) / 1000.0);
    System.out.println("抽取特征时间:\t" + (featuretime - starttime) / 1000.0);
View Full Code Here

    factory = AlphabetFactory.buildFactory();

    /**
     * 标签字典。转为0、1、2、...
     */
    LabelAlphabet labels = factory.DefaultLabelAlphabet();
    /**
     * 特征字典
     */
    IFeatureAlphabet features = factory.DefaultFeatureAlphabet();
    // 将样本通过Pipe抽取特征
   
    featurePipe = new Sequence2FeatureSequence(templets, features, labels);

    Pipe pipe = new SeriesPipes(new Pipe[] { new Target2Label(labels), featurePipe });


    System.out.print("读入训练数据 ...");
    InstanceSet trainSet = new InstanceSet(pipe, factory);

    // 训练集
    trainSet.loadThruStagePipes(new SequenceReader(train, true, "utf8"));
    System.out.println("训练样本个数 " + trainSet.size());
    System.out.println("标签个数: " + labels.size()); //
    System.out.println("特征个数" + features.size());

    // 冻结特征集
    features.setStopIncrement(true);
    labels.setStopIncrement(true);


    // viterbi解码
    HammingLoss loss = new HammingLoss();
    Inferencer inference = new LinearViterbi(templets, labels.size());
    Update update = new LinearViterbiPAUpdate((LinearViterbi) inference, loss);


    OnlineTrainer trainer = new OnlineTrainer(inference, update, loss,
        features.size(), 50,0.1f);

    Linear cl = trainer.train(trainSet);


    // test data没有标注
    Pipe tpipe = featurePipe;
    // 测试集
    InstanceSet testSet = new InstanceSet(tpipe);

    testSet.loadThruPipes(new SequenceReader(testfile, false, "utf8"));
    System.out.println("测试样本个数: " + testSet.size()); //
    String[][] labelsSet = new String[testSet.size()][];
    for (int i = 0; i < testSet.size(); i++) {
      Instance carrier = testSet.get(i);
      int[] pred = (int[]) cl.classify(carrier).getLabel(0);
      labelsSet[i] = labels.lookupString(pred);
    }
   
    String s = SimpleFormatter.format(testSet, labelsSet);
    System.out.println(s);
    System.out.println("Done");
View Full Code Here

    //    BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
//        outfile), enc2));
    StopWords sw = new StopWords(stopwordfile);
   
    LabelAlphabet dict = new LabelAlphabet();
    // words in documents
    ArrayList<TIntArrayList> documentsList= new ArrayList<TIntArrayList>();
   
   
    String line = null;
    while ((line = in.readLine()) != null) {
      line = line.trim()
      if(line.length()==0)
        continue;
      String[] toks = line.split("\\s+");
      TIntArrayList wordlist = new TIntArrayList();
      for(int j=0;j<toks.length;j++){
        String tok = toks[j];
        if(sw.isStopWord(tok))
          continue;
        int idx = dict.lookupIndex(tok);
        wordlist.add(idx);
      }
      documentsList.add(wordlist);
    }
    in.close();
    int[][] documents;
    documents = new int[documentsList.size()][];
    for(int i=0;i<documents.length;i++){
      documents[i] = documentsList.get(i).toArray();
    }
        // vocabulary
        int V = dict.size();
        int M = documents.length;
        // # topics
        int K = 4;
        // good values alpha = 2, beta = .5
        float alpha = 2f;
        float beta = .5f;

        System.out.println("Latent Dirichlet Allocation using Gibbs Sampling.");

        LdaGibbsSampler lda = new LdaGibbsSampler(documents, V);
        lda.configure(10000, 2000, 100, 10);
        lda.gibbs(K, alpha, beta);

        float[][] theta = lda.getTheta();
        float[][] phi = lda.getPhi();

        System.out.println();
        System.out.println();
        System.out.println("Document--Topic Associations, Theta[d][k] (alpha="
            + alpha + ")");
        System.out.print("d\\k\t");
        for (int m = 0; m < theta[0].length; m++) {
            System.out.print("   " + m % 10 + "    ");
        }
        System.out.println();
        for (int m = 0; m < theta.length; m++) {
            System.out.print(m + "\t");
            for (int k = 0; k < theta[m].length; k++) {
                // System.out.print(theta[m][k] + " ");
                System.out.print(shadefloat(theta[m][k], 1) + " ");
            }
            System.out.println();
        }
        System.out.println();
        System.out.println("Topic--Term Associations, Phi[k][w] (beta=" + beta
            + ")");

        System.out.print("k\\w\t");
        for (int w = 0; w < phi[0].length; w++) {
            System.out.print("   " + dict.lookupString(w) + "    ");
        }
        System.out.println();
        for (int k = 0; k < phi.length; k++) {
            System.out.print(k + "\t");
            for (int w = 0; w < phi[k].length; w++) {
              System.out.print(lnf.format(phi[k][w]) + " ");
//              System.out.print(phi[k][w] + " ");
//                System.out.print(shadefloat(phi[k][w], 1) + " ");
            }
            System.out.println();
        }
        for (int k = 0; k < phi.length; k++) {
          int[] top = MyArrays.sort(phi[k]);
         
            for (int w = 0; w < 10; w++) {
              System.out.print(dict.lookupString(top[w]) + " ");
            }
            System.out.println();
        }
    }
View Full Code Here

    }

    public KMeansWordCluster(String alphabetPath, String classCenterPath, String templatePath, String classPath) throws Exception {
        readTemplete(templatePath);
        readClass(classPath);
        LabelAlphabet alphabetRead = (LabelAlphabet)loadObject(alphabetPath);
        @SuppressWarnings("unchecked")
        ArrayList<HashSparseVector> classCenterRead = (ArrayList<HashSparseVector>)loadObject(classCenterPath);
        setAlphabet(alphabetRead);
        setClassCenter(classCenterRead);
        addClassCount();
View Full Code Here

    //TODO: 修改字典类型
    AlphabetFactory.defaultFeatureType = Type.String;
    /**
     * 标签转为0、1、2、...
     */
    LabelAlphabet labels = factory.DefaultLabelAlphabet();

    // 将样本通过Pipe抽取特征
    IFeatureAlphabet features = factory.DefaultFeatureAlphabet();
    featurePipe = new Sequence2FeatureSequence(templets, features, labels);

View Full Code Here

    addEnTag(cl,file);
    cl.saveTo(file);
  }

  private void addEnTag(AbstractTagger cl, String file) throws IOException {
    LabelAlphabet label = cl.factory.DefaultLabelAlphabet()
    HashMap<String, String> map = MyCollection.loadStringStringMap(c2ePath);
    cl.factory.remove("label-en");
    LabelAlphabet enLabel = cl.factory.buildLabelAlphabet("label-en");
    enLabel.clear();
    enLabel.setStopIncrement(false);
    for(int i=0;i<label.size();i++){
      String cn = label.lookupString(i);
      String en = map.get(cn);     
      if(en==null)
        System.out.println("POSTag Not Found: "+cn);
      int id = enLabel.lookupIndex(en);     
    }
    enLabel.setStopIncrement(true);
  }
View Full Code Here

      factory = AlphabetFactory.buildFactory();

    /**
     * 标签转为0、1、2、...
     */
    LabelAlphabet labels = factory.DefaultLabelAlphabet();

    // 将样本通过Pipe抽取特征
    // 这里用的重建特征,而Label不需要重建
    // 测试时不需要重建特征
    IFeatureAlphabet features = null;
View Full Code Here

    long beginTime = System.currentTimeMillis();

    Pipe pipe = createProcessor(false);
    InstanceSet trainSet = new InstanceSet(pipe, factory);

    LabelAlphabet labels = factory.DefaultLabelAlphabet();
    IFeatureAlphabet features = factory.DefaultFeatureAlphabet();

    // 训练集
    trainSet.loadThruStagePipes(new SequenceReader(train,true, "utf8"));

    long endTime = System.currentTimeMillis();
    System.out.println(" done!");
    System.out
    .println("Time escape: " + (endTime - beginTime) / 1000 + "s");
    System.out.println();

    // 输出
    System.out.println("Training Number: " + trainSet.size());

    System.out.println("Label Number: " + labels.size()); // 标签个数
    System.out.println("Feature Number: " + features.size()); // 特征个数

    // 冻结特征集
    features.setStopIncrement(true);
    labels.setStopIncrement(true);

    InstanceSet testSet = null;
    // /////////////////
    if (testfile != null) {

      Pipe tpipe;
      if (false) {// 如果test data没有标注
        tpipe = new SeriesPipes(new Pipe[] { featurePipe });
      } else {
        tpipe = pipe;
      }

      // 测试集
      testSet = new InstanceSet(tpipe);

      testSet.loadThruStagePipes(new SequenceReader(testfile, true, "utf8"));
      System.out.println("Test Number: " + testSet.size()); // 样本个数
    }

    /**
     *
     * 更新参数的准则
     */
    Update update;
    // viterbi解码
    Inferencer inference;
    boolean standard = true;
    HammingLoss loss = new HammingLoss();
    if (standard) {
      inference = new LinearViterbi(templets, labels.size());
      update = new LinearViterbiPAUpdate((LinearViterbi) inference, loss);
    } else {
      inference = new HigherOrderViterbi(templets, labels.size());
      update = new HigherOrderViterbiPAUpdate(templets, labels.size(), true);
    }

    OnlineTrainer trainer = new OnlineTrainer(inference, update, loss,
        features.size(), iterNum, c1);
   
View Full Code Here

TOP

Related Classes of org.fnlp.ml.types.alphabet.LabelAlphabet

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.