Package org.apache.solr.schema

Examples of org.apache.solr.schema.IndexSchema$DynamicReplacement


    // load illegal char string into a metadata field and generate a new document,
    // which will cause the ContentHandler to be invoked.
    metadata.set(fieldName, getFoobarWithNonChars());
    StripNonCharSolrContentHandlerFactory contentHandlerFactory =
      new StripNonCharSolrContentHandlerFactory(DateUtil.DEFAULT_DATE_FORMATS);
    IndexSchema schema = h.getCore().getLatestSchema();
    SolrContentHandler contentHandler =
      contentHandlerFactory.createSolrContentHandler(metadata, new MapSolrParams(new HashMap()), schema);
    SolrInputDocument doc = contentHandler.newDocument();
    String foobar = doc.getFieldValue(fieldName).toString();
    assertTrue("foobar".equals(foobar));
View Full Code Here


    }
  }

  public IndexSchema getIndexSchema() {
    if (context instanceof SolrMorphlineContext) {   
      IndexSchema schema = ((SolrMorphlineContext)context).getIndexSchema();
      if (schema != null) {
        validateSchema(schema);
        return schema;
      }
    }
   
    // If solrHomeDir isn't defined and zkHost and collectionName are defined
    // then download schema.xml and solrconfig.xml, etc from zk and use that as solrHomeDir
    String mySolrHomeDir = solrHomeDir;
    if (solrHomeDir == null || solrHomeDir.length() == 0) {
      if (zkHost == null || zkHost.length() == 0) {
        // TODO: implement download from solrUrl if specified
        throw new MorphlineCompilationException(
            "Downloading a Solr schema requires either parameter 'solrHomeDir' or parameters 'zkHost' and 'collection'",
            config);
      }
      if (collectionName == null || collectionName.length() == 0) {
        throw new MorphlineCompilationException(
            "Parameter 'zkHost' requires that you also pass parameter 'collection'", config);
      }
      ZooKeeperDownloader zki = new ZooKeeperDownloader();
      SolrZkClient zkClient = zki.getZkClient(zkHost);
      try {
        String configName = zki.readConfigName(zkClient, collectionName);
        File downloadedSolrHomeDir = zki.downloadConfigDir(zkClient, configName);
        mySolrHomeDir = downloadedSolrHomeDir.getAbsolutePath();
      } catch (KeeperException e) {
        throw new MorphlineCompilationException("Cannot download schema.xml from ZooKeeper", config, e);
      } catch (InterruptedException e) {
        throw new MorphlineCompilationException("Cannot download schema.xml from ZooKeeper", config, e);
      } catch (IOException e) {
        throw new MorphlineCompilationException("Cannot download schema.xml from ZooKeeper", config, e);
      } finally {
        zkClient.close();
      }
    }
   
    LOG.debug("SolrLocator loading IndexSchema from dir {}", mySolrHomeDir);
    try {
      SolrResourceLoader loader = new SolrResourceLoader(mySolrHomeDir);
      SolrConfig solrConfig = new SolrConfig(loader, "solrconfig.xml", null);
      InputSource is = new InputSource(loader.openSchema("schema.xml"));
      is.setSystemId(SystemIdResolver.createSystemIdFromResourceName("schema.xml"));
     
      IndexSchema schema = new IndexSchema(solrConfig, "schema.xml", is);
      validateSchema(schema);
      return schema;
    } catch (ParserConfigurationException e) {
      throw new MorphlineRuntimeException(e);
    } catch (IOException e) {
View Full Code Here

      this.outputFieldName = getConfigs().getString(config, "outputField");     
      String solrFieldType = getConfigs().getString(config, "solrFieldType");     
      Config solrLocatorConfig = getConfigs().getConfig(config, "solrLocator");
      SolrLocator locator = new SolrLocator(solrLocatorConfig, context);
      LOG.debug("solrLocator: {}", locator);
      IndexSchema schema = locator.getIndexSchema();
      FieldType fieldType = schema.getFieldTypeByName(solrFieldType);
      if (fieldType == null) {
        throw new MorphlineCompilationException("Missing Solr field type in schema.xml for name: " + solrFieldType, config);
      }
      this.analyzer = fieldType.getAnalyzer();
      Preconditions.checkNotNull(analyzer);
View Full Code Here

      this.preserveExisting = getConfigs().getBoolean(config, "preserveExisting", true);     
     
      Config solrLocatorConfig = getConfigs().getConfig(config, "solrLocator");
      SolrLocator locator = new SolrLocator(solrLocatorConfig, context);
      LOG.debug("solrLocator: {}", locator);
      IndexSchema schema = locator.getIndexSchema();
      SchemaField uniqueKey = schema.getUniqueKeyField();
      uniqueKeyName = uniqueKey == null ? null : uniqueKey.getName();
     
      String tmpIdPrefix = getConfigs().getString(config, "idPrefix", null)// for load testing only
      Random tmpRandomIdPrefx = null;
      if ("random".equals(tmpIdPrefix)) { // for load testing only
View Full Code Here

    // load illegal char string into a metadata field and generate a new document,
    // which will cause the ContentHandler to be invoked.
    metadata.set(fieldName, getFoobarWithNonChars());
    StripNonCharSolrContentHandlerFactory contentHandlerFactory =
      new StripNonCharSolrContentHandlerFactory(DateUtil.DEFAULT_DATE_FORMATS);
    IndexSchema schema = h.getCore().getLatestSchema();
    SolrContentHandler contentHandler =
      contentHandlerFactory.createSolrContentHandler(metadata, new MapSolrParams(new HashMap()), schema);
    SolrInputDocument doc = contentHandler.newDocument();
    String foobar = doc.getFieldValue(fieldName).toString();
    assertTrue("foobar".equals(foobar));
View Full Code Here

           
            ByteBuffer buf = ByteBuffer.wrap(schemaBytes);
           
            InputStream stream = new ByteArrayInputStream(buf.array(), buf.position(), buf.remaining());
            SolrConfig solrConfig = new SolrConfig(solrConfigFile);
            IndexSchema schema = new IndexSchema(solrConfig, schemaName, stream);

            core = new SolrCore(indexName, "/tmp/search/solr-hbase/data", solrConfig, schema, null);
           
            logger.debug("Loaded core from hbase: " + indexName);
View Full Code Here

  {
    assert searcher != null;
    if (fieldNameSet == null || fieldNameSet.size() == 0)
      return null;

    IndexSchema schema = searcher.getSchema();

    HashMap<String, String> fieldValueMap = new HashMap<String, String>();
    try {
      Document solrDoc = searcher.doc(docId, fieldNameSet);
      for (String fieldName : fieldNameSet) {
        String fieldValue = null;
        FieldType fieldType = schema.getField(fieldName).getType();

        // the following code is to make sure it works for sortable numerical fields
        if (fieldType.isMultiValued()) {
          // concatenate all the values for multi-valued field
          Fieldable[] fields = solrDoc.getFieldables(fieldName);
View Full Code Here

            InputStream stream = new ByteArrayInputStream(ByteBufferUtil.getArray(buf));

            SolrResourceLoader resourceLoader = new SolandraResourceLoader(indexName, null);
            SolrConfig solrConfig = new SolrConfig(resourceLoader, solrConfigFile, null);

            IndexSchema schema = new IndexSchema(solrConfig, indexName, new InputSource(stream));

            core = new SolrCore(indexName, "/tmp", solrConfig, schema, null);

            if (logger.isDebugEnabled())
              logger.debug("Loaded core from cassandra: " + indexName);
View Full Code Here

    */

    // Minimum term docFreq in order to use the filterCache for that term.
    int minDfFilterCache = params.getFieldInt(field, SolrParams.FACET_ENUM_CACHE_MINDF, 0);

    IndexSchema schema = searcher.getSchema();
    IndexReader r = searcher.getReader();
    FieldType ft = schema.getFieldType(field);

    final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1;   
    final BoundedTreeSet<CountPair<String,Integer>> queue = sort ? new BoundedTreeSet<CountPair<String,Integer>>(maxsize) : null;
    final NamedList res = new NamedList();

View Full Code Here

  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {
    RequestHandlerUtils.addExperimentalFormatWarning( rsp );
   
    IndexSchema schema = req.getSchema();
    SolrIndexSearcher searcher = req.getSearcher();
    IndexReader reader = searcher.getReader();
    SolrParams params = req.getParams();
    int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
       
    // Always show the core lucene info
    rsp.add("index", getIndexInfo(reader, numTerms>0 ) );

    Integer docId = params.getInt( DOC_ID );
    if( docId == null && params.get( ID ) != null ) {
      // Look for something with a given solr ID
      SchemaField uniqueKey = schema.getUniqueKeyField();
      String v = uniqueKey.getType().toInternal( params.get(ID) );
      Term t = new Term( uniqueKey.getName(), v );
      docId = searcher.getFirstMatch( t );
      if( docId < 0 ) {
        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) );
View Full Code Here

TOP

Related Classes of org.apache.solr.schema.IndexSchema$DynamicReplacement

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.