Package uk.ac.cam.ch.wwmm.ptclib.misc

Examples of uk.ac.cam.ch.wwmm.ptclib.misc.Stemmer


   */
  public static void main(String[] args) throws Exception {
    LuceneIndexerSearcher lis = new LuceneIndexerSearcher(false);
    IndexSearcher is = lis.getIndexSearcher();

    Stemmer stemmerTools = new Stemmer(new EnglishStemmer());
   
    //QueryParser qp = new Oscar3QueryParser("txt", new Oscar3Analyzer(), lis, false);
    //Query q = qp.parse("NaCl");
   
    String queryTerm = "lipid";
View Full Code Here


      double expected = scaleFactor * docFreq;
      double excess = df.getCount(s) - expected;
      score = excess / clusterSize;       
      if(score > threshold) scores.put(s, score);
    }
    Stemmer st = new Stemmer(new EnglishStemmer());
    Map<String,List<String>> stems = st.wordsToStems(df.getSet());
    for(String stem : stems.keySet()) {
      List<String> words = stems.get(stem);
      if(words.size() > 1) {
        BooleanQuery bq = new BooleanQuery(true);
        for(String word : words) {
View Full Code Here

      }
     

      clusterFiles.add(new File(ir.document(i).getField("filename").stringValue().replaceAll("markedup", "source")));
    }
    Stemmer st = new Stemmer(new EnglishStemmer());
    Map<String,List<String>> stems = st.wordsToStems(dfs.getSet());

    dfs.discardInfrequent(2);
    NGramTfDf ngtd = NGramTfDf.analyseFiles(clusterFiles);
    ngtd.calculateNGrams();
    Bag<String> bs = ngtd.getDfBag(2);
View Full Code Here

  public static void main(String[] args) throws Exception {
    LuceneIndexerSearcher lis = new LuceneIndexerSearcher(false);
    IndexSearcher is = lis.getIndexSearcher();
   
    Stemmer stemmerTools = new Stemmer(new EnglishStemmer());
   
    //QueryParser qp = new Oscar3QueryParser("txt", new Oscar3Analyzer(), lis, false);
    //Query q = qp.parse("NaCl");
   
    String queryTerm = "content";
View Full Code Here

  /**
   * @param args
   */
  public static void main(String[] args) throws Exception {
    Stemmer stemmer = new Stemmer(new PorterStemmer());
   
    List<File> files = FileTools.getFilesFromDirectoryByName(new File("/home/ptc24/newows/reactnewpubmed"), "scrapbook.xml");

    List<Event> events = new ArrayList<Event>();
   
    List<BagEvent> eventBags = new ArrayList<BagEvent>();
   
    for(File f : files) {
      ScrapBook sb = new ScrapBook(f.getParentFile());
      Document doc = (Document)sb.getDoc().copy();
      Nodes nodes = doc.query("//cmlPile");
      for(int i=0;i<nodes.size();i++) nodes.get(i).detach();
      Document sourceDoc = (Document)doc.copy();
      nodes = sourceDoc.query("//ne");
      for(int i=0;i<nodes.size();i++) {
        XOMTools.removeElementPreservingText((Element)nodes.get(i));
      }
      Document safDoc = InlineToSAF.extractSAFs(doc, sourceDoc, "foo");

      ProcessingDocument procDoc = ProcessingDocumentFactory.getInstance().makeTokenisedDocument(sourceDoc, false, false, false);
      //NameRecogniser nr = new NameRecogniser();
      //nr.halfProcess(sourceDoc);
      //nr.makeTokenisers(false);
      Set<String> tokenSet = new HashSet<String>();
      Bag<String> tokenBag = new Bag<String>();
      for(TokenSequence t : procDoc.getTokenSequences()) {
        //System.out.println(t.getSourceString());
        for(Token token : t.getTokens()) {
          //tokenSet.add("stem=" + stemmer.getStem(token.getValue().toLowerCase()));
          //tokenSet.add(token.getValue().toLowerCase());
          tokenBag.add(token.getValue().toLowerCase());
        }
      }
      //for(String t : tokenBag.getList()) {
      //  System.out.println(t + "\t" + tokenBag.getCount(t));
      //}
     
      //File safFile = new File(f.getParentFile(), "saf.xml");
      //Document safDoc = new Builder().build(safFile);
      Nodes n = safDoc.query("/saf/annot[slot[@name='type']['PRW']]");
      Set<String> wpss = new HashSet<String>();
      boolean hasReact = false;
      boolean hasPotentialReact = n.size() > 0;
      for(int i=0;i<n.size();i++) {
        Element annot = (Element)n.get(i);
        String s = SafTools.getSlotValue(annot, "surface").toLowerCase();
        String subtype = SafTools.getSlotValue(annot, "subtype");
        if("REACT".equals(subtype)) hasReact = true;
        String wps = s+"_"+subtype;
        wpss.add(wps);
        //tokenSet.remove(s);
        //tokenSet.remove(stemmer.getStem(s));
        tokenSet.add("PROTECT:" + s);
        tokenSet.add("PROTECT:stem=" + stemmer.getStem(s));
      }
      if(hasPotentialReact) {
        Event e = new Event(hasReact ? "TRUE" : "FALSE", tokenSet.toArray(new String[0]));
        events.add(e);
        BagEvent be = new BagEvent(hasReact ? "TRUE" : "FALSE", tokenBag);
View Full Code Here

    //System.out.println(System.currentTimeMillis() - time);
  }
 
  public void run(List<File> files) throws Exception {
   
    Stemmer st = new Stemmer(new PorterStemmer());
   
    long time = System.currentTimeMillis();
    for(File f : files) {
      ProcessingDocument procDoc = ProcessingDocumentFactory.getInstance().makeTokenisedDocument(new Builder().build(f), false, false, false);
      Set<Integer> tokSet = new HashSet<Integer>();
      for(TokenSequence ts : procDoc.getTokenSequences()) {
        if(ts.size() == 0) continue;
        for(Token t : ts.getTokens()) {
          String s = t.getValue().toLowerCase().intern();
          if(s.matches(".*[a-z].*")) s = st.getStem(s);
          if(s == null || s.length() == 0) continue;
          int tn = -1;
          if(tokenIndex.containsKey(s)) {
            tn = tokenIndex.get(s);
          } else {
View Full Code Here

   
    return results;
  }
 
  public Map<String,List<String>> ngramsByStem() {
    Stemmer st = new Stemmer(new EnglishStemmer());
    Set<String> terms = new HashSet<String>();
    for(SubstringClass sc : classArray) {
      for(String s : sc.getSuffixStrings(2)) {
        if(!checkTerm(s)) continue;
        terms.add(s);
      }
    }
    Map<String,List<String>> stems = new HashMap<String,List<String>>();
    for(String term : terms) {
      String stem = st.getStem(term.replaceAll(" - ", " "));
      if(!stems.containsKey(stem)) stems.put(stem, new ArrayList<String>());
      stems.get(stem).add(term);
    }
    return stems;
  }
View Full Code Here

   * @param args
   */
  public static void main(String[] args) throws Exception {   
    Map<String,ClassificationEvaluator> evals = new HashMap<String,ClassificationEvaluator>();
   
    Stemmer st = new Stemmer(new PorterStemmer());
   
    //String docNo = "b600383d";

    //File acDir = new File("/home/ptc24/annot_challenges/subtypes_for_lrec_crb/");
    //File acDir = new File("/home/ptc24/annot_challenges/reacttypes_crb_28082008_easy/");
View Full Code Here

import uk.ac.cam.ch.wwmm.ptclib.string.StringTools;

public class DocClassifier {

  public static Event docToEvent(IndexReader ir, int doc, String cue) throws Exception {
    Stemmer st = new Stemmer(new EnglishStemmer());
    List<String> words = new ArrayList<String>();
    boolean hasCue = false;
    TermFreqVector tvf = ir.getTermFreqVector(doc, "txt");
    String [] termArray = tvf.getTerms();
    int [] termFreqs = tvf.getTermFrequencies();
    for(int j=0;j<termArray.length;j++) {
      if(TermSets.getClosedClass().contains(termArray[j])) {
        //ignore
      } else if(termArray[j].equals(cue)) {
        hasCue = true;
        //words.add(termArray[j].intern());
      } else {
        //for(int k=0;k<termFreqs[j];k++) words.add(termArray[j].intern());
        words.add(st.getStem(termArray[j]).intern());
        words.add(termArray[j].intern());
      }
    }
    String c = hasCue ? "TRUE" : "FALSE";
    return new Event(c, words.toArray(new String[0]));
View Full Code Here

TOP

Related Classes of uk.ac.cam.ch.wwmm.ptclib.misc.Stemmer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.