Package org.fao.geonet.kernel.search.index

Examples of org.fao.geonet.kernel.search.index.GeonetworkMultiReader


        TermQuery query = new TermQuery(new Term("fileId", fileId));

        SearchManager searchManager = gc.getBean(SearchManager.class);

        IndexAndTaxonomy indexAndTaxonomy = searchManager.getIndexReader(null, -1);
    GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;

        try {
            IndexSearcher searcher = new IndexSearcher(reader);
            TopDocs tdocs = searcher.search(query, 1);

            if (tdocs.totalHits > 0) {

                Set<String> id = Collections.singleton("_id");
                Document element = reader.document(tdocs.scoreDocs[0].doc, id);
                return element.get("_id");
            }

            return null;
        } finally {
View Full Code Here


     */
    public static Map<String,Map<String,String>> getAllMetadataFromIndexFor(String priorityLang, String field, String value, Set<String> returnFields, boolean checkAllHits) throws Exception {
        final IndexAndTaxonomy indexAndTaxonomy;
        final SearchManager searchmanager;
        ServiceContext context = ServiceContext.get();
        GeonetworkMultiReader reader;
        if (context != null) {
            GeonetContext gc = (GeonetContext) context.getHandlerContext(Geonet.CONTEXT_NAME);
            searchmanager = gc.getBean(SearchManager.class);
            indexAndTaxonomy = searchmanager.getNewIndexReader(priorityLang);
            reader = indexAndTaxonomy.indexReader;
        } else {
            throw new IllegalStateException("There needs to be a ServiceContext in the thread local for this thread");
        }

        Map<String, Map<String, String>> records = new HashMap<String, Map<String, String>>();

        try {
            IndexSearcher searcher = new IndexSearcher(reader);
            TermQuery query = new TermQuery(new Term(field, value));
            SettingInfo settingInfo = searchmanager.getSettingInfo();
            boolean sortRequestedLanguageOnTop = settingInfo.getRequestedLanguageOnTop();
            if(Log.isDebugEnabled(Geonet.LUCENE))
                Log.debug(Geonet.LUCENE, "sortRequestedLanguageOnTop: " + sortRequestedLanguageOnTop);
           
            int numberOfHits = 1;
            int counter = 0;
            if (checkAllHits) {
                numberOfHits = Integer.MAX_VALUE;
            }
            Sort sort = LuceneSearcher.makeSort(Collections.<Pair<String, Boolean>>emptyList(), priorityLang, sortRequestedLanguageOnTop);
            Filter filter = NoFilterFilter.instance();
            TopDocs tdocs = searcher.search(query, filter, numberOfHits, sort);
           
            for( ScoreDoc sdoc : tdocs.scoreDocs ) {
                Map<String, String> values = new HashMap<String, String>();
               
                DocumentStoredFieldVisitor docVisitor = new DocumentStoredFieldVisitor(returnFields);
                reader.document(sdoc.doc, docVisitor);
                Document doc = docVisitor.getDocument();
               
                for( String fieldname : returnFields ) {
                    values.put(fieldname, doc.get(fieldname));
                }
View Full Code Here

     */
  public Set<Integer> getDocsWithXLinks() throws Exception {
        IndexAndTaxonomy indexAndTaxonomy= getNewIndexReader(null);
       
    try {
        GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;

      Set<Integer> docs = new LinkedHashSet<Integer>();
      for (int i = 0; i < reader.maxDoc(); i++) {
        // Commented this out for lucene 4.0 and NRT indexing.  It shouldn't be needed I would guess but leave it here
        // for a bit longer:  Commented out since: Dec 10 2012
        // FIXME: strange lucene hack: sometimes it tries to load a deleted document
        // if (reader.isDeleted(i)) continue;
       
        DocumentStoredFieldVisitor idXLinkSelector = new DocumentStoredFieldVisitor("_id", "_hasxlinks");
        reader.document(i, idXLinkSelector);
        Document doc = idXLinkSelector.getDocument();
        String id = doc.get("_id");
        String hasxlinks = doc.get("_hasxlinks");
                if(Log.isDebugEnabled(Geonet.INDEX_ENGINE))
                    Log.debug(Geonet.INDEX_ENGINE, "Got id "+id+" : '"+hasxlinks+"'");
View Full Code Here

     * @throws Exception
     */
  public Map<String,String> getDocsChangeDate() throws Exception {
        IndexAndTaxonomy indexAndTaxonomy= getNewIndexReader(null);
    try {
        GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;

      int capacity = (int)(reader.maxDoc() / 0.75)+1;
      Map<String,String> docs = new HashMap<String,String>(capacity);
      for (int i = 0; i < reader.maxDoc(); i++) {
        // Commented this out for lucene 4.0 and NRT indexing.  It shouldn't be needed I would guess but leave it here
        // for a bit longer:  Commented out since: Dec 10 2012
        // FIXME: strange lucene hack: sometimes it tries to load a deleted document
        // if (reader.isDeleted(i)) continue;
       
        DocumentStoredFieldVisitor idChangeDateSelector = new DocumentStoredFieldVisitor("_id", "_changeDate");
                reader.document(i, idChangeDateSelector);
                Document doc = idChangeDateSelector.getDocument();
        String id = doc.get("_id");
        if (id == null) {
          Log.error(Geonet.INDEX_ENGINE, "Document with no _id field skipped! Document is "+doc);
          continue;
View Full Code Here

        final PerFieldAnalyzerWrapper analyzer = SearchManager.getAnalyzer(language, true);
        String analyzedSearchValue = LuceneSearcher.analyzeText(fieldName, searchValueWithoutWildcard, analyzer);
        boolean startsWithOnly = !searchValue.startsWith("*") && searchValue.endsWith("*");
       
        try {
            GeonetworkMultiReader multiReader = indexAndTaxonomy.indexReader;
            for (AtomicReaderContext atomicReaderContext : multiReader.getContext().leaves()) {
                final AtomicReader reader = atomicReaderContext.reader();
                Terms terms = reader.terms(fieldName);
                if (terms != null) {
                    TermsEnum termEnum = terms.iterator(null);
                    int i = 1;
View Full Code Here

      SearchManager sm = gc.getBean(SearchManager.class);
     

          IndexAndTaxonomy indexAndTaxonomy= sm.getNewIndexReader(null);
      try {
          GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;
        BooleanQuery groupsQuery = (BooleanQuery) CatalogSearcher.getGroupsQuery(context);
                BooleanQuery query = null;

                // Apply CSW service specific constraint
                if (StringUtils.isNotEmpty(cswServiceSpecificConstraint)) {
                    Query constraintQuery = CatalogSearcher.getCswServiceSpecificConstraintQuery(cswServiceSpecificConstraint, luceneConfig);

                    query = new BooleanQuery();

                    BooleanClause.Occur occur = LuceneUtils
                            .convertRequiredAndProhibitedToOccur(true, false);

                    query.add(groupsQuery, occur);
                    query.add(constraintQuery, occur);

                } else {
                    query = groupsQuery;
                }

        List<Pair<String, Boolean>> sortFields = Collections.singletonList(Pair.read(Geonet.SearchResult.SortBy.RELEVANCE, true));
                Sort   sort = LuceneSearcher.makeSort(sortFields, context.getLanguage(), false);
        CachingWrapperFilter filter = null;

        Pair<TopDocs,Element> searchResults = LuceneSearcher.doSearchAndMakeSummary(
            maxRecords, 0, maxRecords, context.getLanguage(),
            null, luceneConfig.getTaxonomyConfiguration(), reader,
            query, filter, sort, null, false, false,
            false, false  // Scoring is useless for GetDomain operation
        );
        TopDocs hits = searchResults.one();
     
        try {
          // Get mapped lucene field in CSW configuration
          String indexField = catalogConfig.getFieldMapping().get(
              property.toLowerCase());
          if (indexField != null)
            property = indexField;
 
          // check if params asked is in the index using getFieldNames ?
          @SuppressWarnings("resource")
                    FieldInfos fi = SlowCompositeReaderWrapper.wrap(reader).getFieldInfos();
          if (fi.fieldInfo(property) == null)
            continue;
         
          boolean isRange = false;
          if (catalogConfig.getGetRecordsRangeFields().contains(
              property))
            isRange = true;
         
          if (isRange)
            listOfValues = new Element("RangeOfValues", Csw.NAMESPACE_CSW);
          else 
            listOfValues = new Element("ListOfValues", Csw.NAMESPACE_CSW);

          Set<String> fields = new HashSet<String>();
          fields.add(property);
          fields.add("_isTemplate");
         
 
          // parse each document in the index
          String[] fieldValues;
                    Collator stringCollator = Collator.getInstance();
                    stringCollator.setStrength(Collator.PRIMARY);
                    SortedSet<String> sortedValues = new TreeSet<String>(stringCollator);
          ObjectKeyIntOpenHashMap duplicateValues = new ObjectKeyIntOpenHashMap();
          for (int j = 0; j < hits.scoreDocs.length; j++) {
              DocumentStoredFieldVisitor selector = new DocumentStoredFieldVisitor(fields);
            reader.document(hits.scoreDocs[j].doc, selector);
            Document doc = selector.getDocument();

            // Skip templates and subTemplates
            String[] isTemplate = doc.getValues("_isTemplate");
            if (isTemplate[0] != null && !isTemplate[0].equals("n"))
View Full Code Here

            Log.debug(Geonet.CSW_SEARCH, "Found searcher with " + indexAndTaxonomy.version + " comparing with " + _searchToken);
            if (_searchToken != -1L && indexAndTaxonomy.version != _searchToken) {
                throw new SearchExpiredEx("Search has expired/timed out - start a new search");
            }
            _searchToken = indexAndTaxonomy.version;
            GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;
            return performSearch(context, luceneExpr, filterExpr, filterVersion, sort, resultType, startPosition, maxRecords,
                    maxHitsInSummary, cswServiceSpecificContraint, reader, indexAndTaxonomy.taxonomyReader);
        } catch (Exception e) {
      Log.error(Geonet.CSW_SEARCH, "Error while searching metadata ");
      Log.error(Geonet.CSW_SEARCH, "  (C) StackTrace:\n" + Util.getStackTrace(e));
View Full Code Here

        try {
            Log.debug(Geonet.CSW_SEARCH, "Found searcher with " + indexAndTaxonomy.version + " comparing with " + _searchToken);
            if (indexAndTaxonomy.version != _searchToken && !(!luceneConfig.useNRTManagerReopenThread() || Boolean.parseBoolean(System.getProperty(LuceneConfig.USE_NRT_MANAGER_REOPEN_THREAD)))) {
                throw new SearchExpiredEx("Search has expired/timed out - start a new search");
            }
            GeonetworkMultiReader _reader = indexAndTaxonomy.indexReader;
            Pair<TopDocs, Element> searchResults = LuceneSearcher.doSearchAndMakeSummary(maxHits, 0, maxHits, _lang.presentationLanguage,
                    luceneConfig.getTaxonomy().get(ResultType.RESULTS.toString()), luceneConfig.getTaxonomyConfiguration(),
                    _reader, _query, wrapSpatialFilter(), _sort, null, false,
                    luceneConfig.isTrackDocScores(), luceneConfig.isTrackMaxScore(), luceneConfig.isDocsScoredInOrder());
            TopDocs tdocs = searchResults.one();
            Element summary = searchResults.two();

            int numHits = Integer.parseInt(summary.getAttributeValue("count"));

            if (Log.isDebugEnabled(Geonet.CSW_SEARCH))
                Log.debug(Geonet.CSW_SEARCH, "Records matched : " + numHits);

            // --- retrieve results
            List<String> response = new ArrayList<String>();

            for (ScoreDoc sdoc : tdocs.scoreDocs) {
                Document doc = _reader.document(sdoc.doc, _uuidselector);
                String uuid = doc.get("_uuid");
                if (uuid != null)
                    response.add(uuid);
            }
            return response;
View Full Code Here

                SearchManager searchMan = gc.getBean(SearchManager.class);


                IndexAndTaxonomy indexAndTaxonomy= searchMan.getNewIndexReader(null);
                GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;
                try {
                    Query query = new MatchAllDocsQuery();
                    TopDocs hits = new IndexSearcher(reader).search(query, 1);
                    if (hits.totalHits > 1) {
                        return Result.healthy();
View Full Code Here

                GeonetContext gc = (GeonetContext) context.getHandlerContext(Geonet.CONTEXT_NAME);

                SearchManager searchMan = gc.getBean(SearchManager.class);

                IndexAndTaxonomy indexAndTaxonomy= searchMan.getNewIndexReader(null);
                GeonetworkMultiReader reader = indexAndTaxonomy.indexReader;
                try {
                    TermQuery indexError = new TermQuery(new Term("_indexingError", "1"));
                    TopDocs hits = new IndexSearcher(reader).search(indexError, 1);
                    if (hits.totalHits > 0) {
                        return Result.unhealthy("Found "+hits.totalHits+" metadata that had errors during indexing");
View Full Code Here

TOP

Related Classes of org.fao.geonet.kernel.search.index.GeonetworkMultiReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.