Examples of ArgumentParserException


Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

    }

    private void verifyIsAbsolute(ArgumentParser parser, Argument arg, File file)
            throws ArgumentParserException {
        if (!file.isAbsolute()) {
            throw new ArgumentParserException(
                    String.format(TextHelper.LOCALE_ROOT,
                            "Not an absolute file: '%s'", file), parser, arg);
        }
    }
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

        } catch (InstantiationException e) {
            handleInstatiationError(e);
        } catch (IllegalAccessException e) {
            handleInstatiationError(e);
        } catch (InvocationTargetException e) {
            throw new ArgumentParserException(String.format(
                    TextHelper.LOCALE_ROOT,
                    "could not convert '%s' to %s (%s)", value,
                    type_.getSimpleName(), e.getCause().getMessage()),
                    e.getCause(), parser, arg);
        } catch (NoSuchMethodException e) {
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

        job = Job.getInstance(getConf());
        job.setJarByClass(getClass());

        if (options.morphlineFile == null) {
            throw new ArgumentParserException("Argument --morphline-file is required", null);
        }
        verifyGoLiveArgs(options, null);
        verifyZKStructure(options, null);

        int mappers = new JobClient(job.getConfiguration()).getClusterStatus().getMaxMapTasks(); // MR1
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

        return true;
    }

    public static void verifyGoLiveArgs(Options opts, ArgumentParser parser) throws ArgumentParserException {
        if (opts.zkHost == null && opts.solrHomeDir == null) {
            throw new ArgumentParserException("At least one of --zk-host or --solr-home-dir is required", parser);
        }
        if (opts.goLive && opts.zkHost == null && opts.shardUrls == null) {
            throw new ArgumentParserException("--go-live requires that you also pass --shard-url or --zk-host", parser);
        }

        if (opts.zkHost != null && opts.collection == null) {
            throw new ArgumentParserException("--zk-host requires that you also pass --collection", parser);
        }

        if (opts.zkHost != null) {
            return;
            // verify structure of ZK directory later, to avoid checking run-time errors during parsing.
        } else if (opts.shardUrls != null) {
            if (opts.shardUrls.size() == 0) {
                throw new ArgumentParserException("--shard-url requires at least one URL", parser);
            }
        } else if (opts.shards != null) {
            if (opts.shards <= 0) {
                throw new ArgumentParserException("--shards must be a positive number: " + opts.shards, parser);
            }
        } else {
            throw new ArgumentParserException("You must specify one of the following (mutually exclusive) arguments: "
                    + "--zk-host or --shard-url or --shards", parser);
        }

        if (opts.shardUrls != null) {
            opts.shards = opts.shardUrls.size();
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

            ForkedZooKeeperInspector zki = new ForkedZooKeeperInspector();
            try {
                opts.shardUrls = zki.extractShardUrls(opts.zkHost, opts.collection);
            } catch (Exception e) {
                LOG.debug("Cannot extract SolrCloud shard URLs from ZooKeeper", e);
                throw new ArgumentParserException(e, parser);
            }
            assert opts.shardUrls != null;
            if (opts.shardUrls.size() == 0) {
                throw new ArgumentParserException("--zk-host requires ZooKeeper " + opts.zkHost
                        + " to contain at least one SolrCore for collection: " + opts.collection, parser);
            }
            opts.shards = opts.shardUrls.size();
            LOG.debug("Using SolrCloud shard URLs: {}", opts.shardUrls);
        }
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

                        @Override
                        public Path convert(ArgumentParser parser, Argument arg, String value) throws ArgumentParserException {
                            Path path = super.convert(parser, arg, value);
                            if ("hdfs".equals(path.toUri().getScheme()) && path.toUri().getAuthority() == null) {
                                // TODO: consider defaulting to hadoop's fs.default.name here or in SolrRecordWriter.createEmbeddedSolrServer()
                                throw new ArgumentParserException("Missing authority in path URI: " + path, parser);
                            }
                            return path;
                        }
                    }.verifyHasScheme().verifyIsAbsolute().verifyCanWriteParent())
                    .required(true)
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

                        if ("hdfs".equals(path.toUri().getScheme())
                                && path.toUri().getAuthority() == null) {
                            // TODO: consider defaulting to hadoop's
                            // fs.default.name here or in
                            // SolrRecordWriter.createEmbeddedSolrServer()
                            throw new ArgumentParserException("Missing authority in path URI: "
                                    + path, parser);
                        }
                        return path;
                    }
                }.verifyHasScheme().verifyIsAbsolute().verifyCanWriteParent())
                .help("HDFS directory to write Solr indexes to. Inside there one output directory per shard will be generated. "
                    + "Example: hdfs://c2202.mycompany.com/user/$USER/test");
       
        Argument overwriteOutputDirArg = optionalGroup.addArgument("--overwrite-output-dir")
                .action(Arguments.storeTrue())
                .help("Overwrite the directory specified by --output-dir if it already exists. Using this parameter will result in " +
                      "the output directory being recursively deleted at job startup.");

        Argument morphlineFileArg = optionalGroup.addArgument("--morphline-file")
                .metavar("FILE")
                .type(new FileArgumentType().verifyExists().verifyIsFile().verifyCanRead())
                .help("Relative or absolute path to a local config file that contains one or more morphlines. " +
                      "The file must be UTF-8 encoded. The file will be uploaded to each MR task. " +
                      "If supplied, this overrides the value from the --hbase-indexer-* options. " +
                      "Example: /path/to/morphlines.conf");
             
        Argument morphlineIdArg = optionalGroup.addArgument("--morphline-id")
                .metavar("STRING")
                .type(String.class)
                .help("The identifier of the morphline that shall be executed within the morphline config file, " +
                      "e.g. specified by --morphline-file. If the --morphline-id option is ommitted the first (i.e. " +
                      "top-most) morphline within the config file is used. If supplied, this overrides the value " +
                      "from the --hbase-indexer-* options. Example: morphline1 ");
               
        Argument solrHomeDirArg = nonSolrCloud(optionalGroup.addArgument("--solr-home-dir")
                .metavar("DIR")
                .type(new FileArgumentType() {
                    @Override
                    public File convert(ArgumentParser parser, Argument arg, String value)
                            throws ArgumentParserException {
                        File solrHomeDir = super.convert(parser, arg, value);
                        File solrConfigFile = new File(new File(solrHomeDir, "conf"),
                                "solrconfig.xml");
                        new FileArgumentType().verifyExists().verifyIsFile().verifyCanRead()
                                .convert(parser, arg, solrConfigFile.getPath());
                        return solrHomeDir;
                    }
                }.verifyIsDirectory().verifyCanRead())
                .required(false)
                .help("Relative or absolute path to a local dir containing Solr conf/ dir and in particular "
                    + "conf/solrconfig.xml and optionally also lib/ dir. This directory will be uploaded to each MR task. "
                    + "Example: src/test/resources/solr/minimr"));

        Argument updateConflictResolverArg = optionalGroup.addArgument("--update-conflict-resolver")
                .metavar("FQCN")
                .type(String.class)
                .setDefault(RetainMostRecentUpdateConflictResolver.class.getName())
                .help("Fully qualified class name of a Java class that implements the UpdateConflictResolver interface. "
                    + "This enables deduplication and ordering of a series of document updates for the same unique document "
                    + "key. For example, a MapReduce batch job might index multiple files in the same job where some of the "
                    + "files contain old and new versions of the very same document, using the same unique document key.\n"
                    + "Typically, implementations of this interface forbid collisions by throwing an exception, or ignore all but "
                    + "the most recent document version, or, in the general case, order colliding updates ascending from least "
                    + "recent to most recent (partial) update. The caller of this interface (i.e. the Hadoop Reducer) will then "
                    + "apply the updates to Solr in the order returned by the orderUpdates() method.\n"
                    + "The default RetainMostRecentUpdateConflictResolver implementation ignores all but the most recent document "
                    + "version, based on a configurable numeric Solr field, which defaults to the file_last_modified timestamp");

        Argument reducersArg = optionalGroup.addArgument("--reducers")
                .metavar("INTEGER")
                .type(Integer.class)
                .choices(new RangeArgumentChoice(-2, Integer.MAX_VALUE))
                // TODO: also support X% syntax where X is an integer
                .setDefault(-1)
                .help("Tuning knob that indicates the number of reducers to index into. "
                    + "0 indicates that no reducers should be used, and documents should be sent directly from the mapper tasks to live Solr servers. "
                    + "-1 indicates use all reduce slots available on the cluster. "
                    + "-2 indicates use one reducer per output shard, which disables the mtree merge MR algorithm. "
                    + "The mtree merge MR algorithm improves scalability by spreading load "
                    + "(in particular CPU load) among a number of parallel reducers that can be much larger than the number "
                    + "of solr shards expected by the user. It can be seen as an extension of concurrent lucene merges "
                    + "and tiered lucene merges to the clustered case. The subsequent mapper-only phase "
                    + "merges the output of said large number of reducers to the number of shards expected by the user, "
                    + "again by utilizing more available parallelism on the cluster.");

        Argument fanoutArg = optionalGroup.addArgument("--fanout")
                .metavar("INTEGER")
                .type(Integer.class)
                .choices(new RangeArgumentChoice(2, Integer.MAX_VALUE))
                .setDefault(Integer.MAX_VALUE)
                .help(FeatureControl.SUPPRESS);

        Argument maxSegmentsArg = optionalGroup.addArgument("--max-segments")
                .metavar("INTEGER")
                .type(Integer.class)
                .choices(new RangeArgumentChoice(1, Integer.MAX_VALUE))
                .setDefault(1)
                .help("Tuning knob that indicates the maximum number of segments to be contained on output in the index of "
                    + "each reducer shard. After a reducer has built its output index it applies a merge policy to merge segments "
                    + "until there are <= maxSegments lucene segments left in this index. "
                    + "Merging segments involves reading and rewriting all data in all these segment files, "
                    + "potentially multiple times, which is very I/O intensive and time consuming. "
                    + "However, an index with fewer segments can later be merged faster, "
                    + "and it can later be queried faster once deployed to a live Solr serving shard. "
                    + "Set maxSegments to 1 to optimize the index for low query latency. "
                    + "In a nutshell, a small maxSegments value trades indexing latency for subsequently improved query latency. "
                    + "This can be a reasonable trade-off for batch indexing systems.");

        Argument fairSchedulerPoolArg = optionalGroup.addArgument("--fair-scheduler-pool")
                .metavar("STRING")
                .help("Optional tuning knob that indicates the name of the fair scheduler pool to submit jobs to. "
                    + "The Fair Scheduler is a pluggable MapReduce scheduler that provides a way to share large clusters. "
                    + "Fair scheduling is a method of assigning resources to jobs such that all jobs get, on average, an "
                    + "equal share of resources over time. When there is a single job running, that job uses the entire "
                    + "cluster. When other jobs are submitted, tasks slots that free up are assigned to the new jobs, so "
                    + "that each job gets roughly the same amount of CPU time. Unlike the default Hadoop scheduler, which "
                    + "forms a queue of jobs, this lets short jobs finish in reasonable time while not starving long jobs. "
                    + "It is also an easy way to share a cluster between multiple of users. Fair sharing can also work with "
                    + "job priorities - the priorities are used as weights to determine the fraction of total compute time "
                    + "that each job gets.");

        Argument dryRunArg = optionalGroup.addArgument("--dry-run")
                .action(Arguments.storeTrue())
                .help("Run in local mode and print documents to stdout instead of loading them into Solr. This executes "
                    + "the morphline in the client process (without submitting a job to MR) for quicker turnaround during "
                    + "early trial & debug sessions.");

        Argument log4jConfigFileArg = optionalGroup.addArgument("--log4j")
                .metavar("FILE")
                .type(new FileArgumentType().verifyExists().verifyIsFile().verifyCanRead())
                .help("Relative or absolute path to a log4j.properties config file on the local file system. This file "
                    + "will be uploaded to each MR task. Example: /path/to/log4j.properties");

        Argument verboseArg = optionalGroup.addArgument("--verbose", "-v")
                .action(Arguments.storeTrue())
                .help("Turn on verbose output.");

        Argument clearIndexArg = optionalGroup.addArgument("--clear-index")
                .action(Arguments.storeTrue())
                .help("Will attempt to delete all entries in a solr index before starting batch build. This is not " +
                        "transactional so if the build fails the index will be empty.");
       
        optionalGroup.addArgument(SHOW_NON_SOLR_CLOUD)
                .action(Arguments.storeTrue())
                .help("Also show options for Non-SolrCloud mode as part of --help.");
   
        Namespace ns;
        try {
            ns = parser.parseArgs(args);
        } catch (FoundHelpArgument e) {
            return 0;
        } catch (ArgumentParserException e) {
            parser.handleError(e);
            return 1;
        }

        opts.log4jConfigFile = (File) ns.get(log4jConfigFileArg.getDest());
        if (opts.log4jConfigFile != null) {
            PropertyConfigurator.configure(opts.log4jConfigFile.getPath());
        }
        LOG.debug("Parsed command line args: " + ns);

        opts.inputLists = Collections.EMPTY_LIST;
        opts.outputDir = (Path) ns.get(outputDirArg.getDest());
        opts.overwriteOutputDir = ns.getBoolean(overwriteOutputDirArg.getDest());
        opts.reducers = ns.getInt(reducersArg.getDest());
        opts.updateConflictResolver = ns.getString(updateConflictResolverArg.getDest());
        opts.fanout = ns.getInt(fanoutArg.getDest());
        opts.maxSegments = ns.getInt(maxSegmentsArg.getDest());
        opts.morphlineFile = (File) ns.get(morphlineFileArg.getDest());
        opts.morphlineId = ns.getString(morphlineIdArg.getDest());
        opts.solrHomeDir = (File) ns.get(solrHomeDirArg.getDest());
        opts.fairSchedulerPool = ns.getString(fairSchedulerPoolArg.getDest());
        opts.isDryRun = ns.getBoolean(dryRunArg.getDest());
        opts.isVerbose = ns.getBoolean(verboseArg.getDest());
        opts.zkHost = ns.getString(zkHostArg.getDest());
        opts.shards = ns.getInt(shardsArg.getDest());
        opts.shardUrls = ForkedMapReduceIndexerTool.buildShardUrls(ns.getList(shardUrlsArg.getDest()), opts.shards);
        opts.goLive = ns.getBoolean(goLiveArg.getDest());
        opts.goLiveThreads = ns.getInt(goLiveThreadsArg.getDest());
        opts.collection = ns.getString(collectionArg.getDest());
        opts.clearIndex = ns.getBoolean(clearIndexArg.getDest());

        opts.hbaseIndexerComponentFactory = (String) ns.get(hbaseIndexerComponentFactoryArg.getDest());
        opts.hbaseIndexerConfigFile = (File) ns.get(hbaseIndexerConfigArg.getDest());
        opts.hbaseIndexerZkHost = ns.getString(indexerZkHostArg.getDest());
        opts.hbaseIndexerName = ns.getString(indexNameArg.getDest());
        opts.hbaseTableName = ns.getString(hbaseTableNameArg.getDest());
        opts.hbaseStartRow = ns.getString(startRowArg.getDest());
        opts.hbaseEndRow = ns.getString(endRowArg.getDest());
        opts.hbaseStartTimeString = ns.getString(startTimeArg.getDest());
        opts.hbaseEndTimeString = ns.getString(endTimeArg.getDest());
        opts.hbaseTimestampFormat = ns.getString(timestampFormatArg.getDest());

        try {
            try {
                opts.evaluate();
            } catch (IllegalStateException ise) {
                throw new ArgumentParserException(ise.getMessage(), parser);
            }
        } catch (ArgumentParserException e) {
            parser.handleError(e);
            return 1;
        }
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

    job = Job.getInstance(getConf());
    job.setJarByClass(getClass());

    if (options.morphlineFile == null) {
      throw new ArgumentParserException("Argument --morphline-file is required", null);
    }
    verifyGoLiveArgs(options, null);
    verifyZKStructure(options, null);

    int mappers = new JobClient(job.getConfiguration()).getClusterStatus().getMaxMapTasks(); // MR1
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

    return true;
  }

  private static void verifyGoLiveArgs(Options opts, ArgumentParser parser) throws ArgumentParserException {
    if (opts.zkHost == null && opts.solrHomeDir == null) {
      throw new ArgumentParserException("At least one of --zk-host or --solr-home-dir is required", parser);
    }
    if (opts.goLive && opts.zkHost == null && opts.shardUrls == null) {
      throw new ArgumentParserException("--go-live requires that you also pass --shard-url or --zk-host", parser);
    }
   
    if (opts.zkHost != null && opts.collection == null) {
      throw new ArgumentParserException("--zk-host requires that you also pass --collection", parser);
    }
   
    if (opts.zkHost != null) {
      return;
      // verify structure of ZK directory later, to avoid checking run-time errors during parsing.
    } else if (opts.shardUrls != null) {
      if (opts.shardUrls.size() == 0) {
        throw new ArgumentParserException("--shard-url requires at least one URL", parser);
      }
    } else if (opts.shards != null) {
      if (opts.shards <= 0) {
        throw new ArgumentParserException("--shards must be a positive number: " + opts.shards, parser);
      }
    } else {
      throw new ArgumentParserException("You must specify one of the following (mutually exclusive) arguments: "
          + "--zk-host or --shard-url or --shards", parser);
    }

    if (opts.shardUrls != null) {
      opts.shards = opts.shardUrls.size();
View Full Code Here

Examples of net.sourceforge.argparse4j.inf.ArgumentParserException

      ZooKeeperInspector zki = new ZooKeeperInspector();
      try {
        opts.shardUrls = zki.extractShardUrls(opts.zkHost, opts.collection);
      } catch (Exception e) {
        LOG.debug("Cannot extract SolrCloud shard URLs from ZooKeeper", e);
        throw new ArgumentParserException(e, parser);
      }
      assert opts.shardUrls != null;
      if (opts.shardUrls.size() == 0) {
        throw new ArgumentParserException("--zk-host requires ZooKeeper " + opts.zkHost
          + " to contain at least one SolrCore for collection: " + opts.collection, parser);
      }
      opts.shards = opts.shardUrls.size();
      LOG.debug("Using SolrCloud shard URLs: {}", opts.shardUrls);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.