Package org.apache.hadoop

Examples of org.apache.hadoop.HadoopIllegalArgumentException


    if (feature == null) {
      LOG.debug("Both short-circuit local reads and UNIX domain socket are disabled.");
    } else {
      if (conf.getDomainSocketPath().isEmpty()) {
        throw new HadoopIllegalArgumentException(feature + " is enabled but "
            + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
      } else if (DomainSocket.getLoadingFailureReason() != null) {
        LOG.warn(feature + " cannot be used because "
            + DomainSocket.getLoadingFailureReason());
      } else {
View Full Code Here


      return -1;
    }

    protected void checkArgs(String args[]) {
      if (args.length > 0) {
        throw new HadoopIllegalArgumentException(
            "Did not expect argument: " + args[0]);
      }
    }
View Full Code Here

 
  static class PrintConfKeyCommandHandler extends CommandHandler {
    @Override
    protected void checkArgs(String[] args) {
      if (args.length != 1) {
        throw new HadoopIllegalArgumentException(
            "usage: " + Command.CONFKEY.getUsage());
      }
    }
View Full Code Here

  /** Fetch live and dead datanodes. */
  public void fetchDatanodes(final List<DatanodeDescriptor> live,
      final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
    if (live == null && dead == null) {
      throw new HadoopIllegalArgumentException("Both live and dead lists are null");
    }

    // There is no need to take namesystem reader lock as
    // getDatanodeListForReport will synchronize on datanodeMap
    final List<DatanodeDescriptor> results =
View Full Code Here

      List<ZKAuthInfo> authInfo,
      ActiveStandbyElectorCallback app, int maxRetryNum) throws IOException,
      HadoopIllegalArgumentException, KeeperException {
    if (app == null || acl == null || parentZnodeName == null
        || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
      throw new HadoopIllegalArgumentException("Invalid argument");
    }
    zkHostPort = zookeeperHostPorts;
    zkSessionTimeout = zookeeperSessionTimeout;
    zkAcl = acl;
    zkAuthInfo = authInfo;
View Full Code Here

   */
  public synchronized void joinElection(byte[] data)
      throws HadoopIllegalArgumentException {
   
    if (data == null) {
      throw new HadoopIllegalArgumentException("data cannot be null");
    }
   
    if (wantToBeInElection) {
      LOG.info("Already in election. Not re-connecting.");
      return;
View Full Code Here

   * @param supportedScheme
   */
  public void checkScheme(URI uri, String supportedScheme) {
    String scheme = uri.getScheme();
    if (scheme == null) {
      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
    }
    if (!scheme.equals(supportedScheme)) {
      throw new HadoopIllegalArgumentException("Uri scheme " + uri
          + " does not match the scheme " + supportedScheme);
    }
  }
View Full Code Here

      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
    checkScheme(uri, supportedScheme);
    // A file system implementation that requires authority must always
    // specify default port
    if (defaultPort < 0 && authorityNeeded) {
      throw new HadoopIllegalArgumentException(
          "FileSystem implementation error -  default port " + defaultPort
              + " is not valid");
    }
    String authority = uri.getAuthority();
    if (authority == null) {
       if (authorityNeeded) {
         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
       } else {
         return new URI(supportedScheme + ":///");
       }  
    }
    // authority is non null  - AuthorityNeeded may be true or false.
View Full Code Here

    Boolean createParent = null;
    for (CreateOpts iOpt : opts) {
      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
        if (blockSize != -1) {
          throw new HadoopIllegalArgumentException(
              "BlockSize option is set multiple times");
        }
        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
        if (bufferSize != -1) {
          throw new HadoopIllegalArgumentException(
              "BufferSize option is set multiple times");
        }
        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
        if (replication != -1) {
          throw new HadoopIllegalArgumentException(
              "ReplicationFactor option is set multiple times");
        }
        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
        if (bytesPerChecksum != -1) {
          throw new HadoopIllegalArgumentException(
              "BytesPerChecksum option is set multiple times");
        }
        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
        if (checksumOpt != null) {
          throw new  HadoopIllegalArgumentException(
              "CreateChecksumType option is set multiple times");
        }
        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
        if (permission != null) {
          throw new HadoopIllegalArgumentException(
              "Perms option is set multiple times");
        }
        permission = ((CreateOpts.Perms) iOpt).getValue();
      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
        if (progress != null) {
          throw new HadoopIllegalArgumentException(
              "Progress option is set multiple times");
        }
        progress = ((CreateOpts.Progress) iOpt).getValue();
      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
        if (createParent != null) {
          throw new HadoopIllegalArgumentException(
              "CreateParent option is set multiple times");
        }
        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
      } else {
        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
            iOpt.getClass().getName());
      }
    }
    if (permission == null) {
      throw new HadoopIllegalArgumentException("no permission supplied");
    }


    FsServerDefaults ssDef = getServerDefaults();
    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
      throw new IOException("Internal error: default blockSize is" +
          " not a multiple of default bytesPerChecksum ");
    }
   
    if (blockSize == -1) {
      blockSize = ssDef.getBlockSize();
    }

    // Create a checksum option honoring user input as much as possible.
    // If bytesPerChecksum is specified, it will override the one set in
    // checksumOpt. Any missing value will be filled in using the default.
    ChecksumOpt defaultOpt = new ChecksumOpt(
        ssDef.getChecksumType(),
        ssDef.getBytesPerChecksum());
    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
        checksumOpt, bytesPerChecksum);

    if (bufferSize == -1) {
      bufferSize = ssDef.getFileBufferSize();
    }
    if (replication == -1) {
      replication = ssDef.getReplication();
    }
    if (createParent == null) {
      createParent = false;
    }

    if (blockSize % bytesPerChecksum != 0) {
      throw new HadoopIllegalArgumentException(
             "blockSize should be a multiple of checksumsize");
    }

    return this.createInternal(f, createFlag, permission, bufferSize,
      replication, blockSize, progress, checksumOpt, createParent);
View Full Code Here

  public static DFSZKFailoverController create(Configuration conf) {
    Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
    String nsId = DFSUtil.getNamenodeNameServiceId(conf);

    if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
      throw new HadoopIllegalArgumentException(
          "HA is not enabled for this namenode.");
    }
    String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
    NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
    DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.HadoopIllegalArgumentException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.