Package org.apache.flume

Examples of org.apache.flume.FlumeException


          zkBuilder.append(",");
        } else {
          zkQuorum = zkBuilder.toString();
        }
        if (zkHostAndPort[1] == null) {
          throw new FlumeException("Expected client port for the ZK node!");
        }
        if (port == null) {
          port = Integer.parseInt(zkHostAndPort[1].trim());
        } else if (!port.equals(Integer.parseInt(zkHostAndPort[1].trim()))) {
          throw new FlumeException("All Zookeeper nodes in the quorum must " +
            "use the same client port.");
        }
      }
      if(port == null) {
        port = HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT;
View Full Code Here


      }
      if (socketChannelFactory != null) {
        socketChannelFactory.releaseExternalResources();
      }
      if (t instanceof IOException) {
        throw new FlumeException(this + ": RPC connection error", t);
      } else if (t instanceof FlumeException) {
        throw (FlumeException) t;
      } else if (t instanceof Error) {
        throw (Error) t;
      } else {
        throw new FlumeException(this + ": Unexpected exception", t);
      }
    }

    setState(ConnState.READY);
  }
View Full Code Here

      callTimeoutPool = null;
    }
    try {
      transceiver.close();
    } catch (IOException ex) {
      throw new FlumeException(this + ": Error closing transceiver.", ex);
    } finally {
      setState(ConnState.DEAD);
    }

  }
View Full Code Here

  public synchronized void configure(Properties properties)
      throws FlumeException {
    stateLock.lock();
    try{
      if(connState == ConnState.READY || connState == ConnState.DEAD){
        throw new FlumeException("This client was already configured, " +
            "cannot reconfigure.");
      }
    } finally {
      stateLock.unlock();
    }

    // batch size
    String strBatchSize = properties.getProperty(
        RpcClientConfigurationConstants.CONFIG_BATCH_SIZE);
    logger.debug("Batch size string = " + strBatchSize);
    batchSize = RpcClientConfigurationConstants.DEFAULT_BATCH_SIZE;
    if (strBatchSize != null && !strBatchSize.isEmpty()) {
      try {
        int parsedBatch = Integer.parseInt(strBatchSize);
        if (parsedBatch < 1) {
          logger.warn("Invalid value for batchSize: {}; Using default value.", parsedBatch);
        } else {
          batchSize = parsedBatch;
        }
      } catch (NumberFormatException e) {
        logger.warn("Batchsize is not valid for RpcClient: " + strBatchSize +
            ". Default value assigned.", e);
      }
    }

    // host and port
    String hostNames = properties.getProperty(
        RpcClientConfigurationConstants.CONFIG_HOSTS);
    String[] hosts = null;
    if (hostNames != null && !hostNames.isEmpty()) {
      hosts = hostNames.split("\\s+");
    } else {
      throw new FlumeException("Hosts list is invalid: " + hostNames);
    }

    if (hosts.length > 1) {
      logger.warn("More than one hosts are specified for the default client. "
          + "Only the first host will be used and others ignored. Specified: "
          + hostNames + "; to be used: " + hosts[0]);
    }

    String host = properties.getProperty(
        RpcClientConfigurationConstants.CONFIG_HOSTS_PREFIX+hosts[0]);
    if (host == null || host.isEmpty()) {
      throw new FlumeException("Host not found: " + hosts[0]);
    }
    String[] hostAndPort = host.split(":");
    if (hostAndPort.length != 2){
      throw new FlumeException("Invalid hostname: " + hosts[0]);
    }
    Integer port = null;
    try {
      port = Integer.parseInt(hostAndPort[1]);
    } catch (NumberFormatException e) {
      throw new FlumeException("Invalid Port: " + hostAndPort[1], e);
    }
    this.address = new InetSocketAddress(hostAndPort[0], port);

    // connect timeout
    connectTimeout =
View Full Code Here

      for (Result r = rs.next(); r != null; r = rs.next()) {
        out = r.getValue(columnFamily.getBytes(), plCol.getBytes());

        if(i >= results.length - 1){
          rs.close();
          throw new FlumeException("More results than expected in the table." +
              "Expected = " + numEvents +". Found = " + i);
        }
        results[i++] = out;
        System.out.println(out);
      }
View Full Code Here

            new SuccessCallback<Object, Object>(latch) ,
            new ErrBack<Object, Object>(latch, fail));
    try {
      latch.await();
    } catch (InterruptedException e) {
      throw new FlumeException(
          "Interrupted while waiting for Hbase Callbacks", e);
    }
    if(fail.get()){
      throw new FlumeException(
          "Could not start sink. " +
          "Table or column family does not exist in Hbase.");
    } else {
      open = true;
    }
View Full Code Here

      //their criteria for flushing does not change how we flush.
      table.setAutoFlush(false);
    } catch (IOException e) {
      logger.error("Could not load table, " + tableName +
          " from HBase", e);
      throw new FlumeException("Could not load table, " + tableName +
          " from HBase", e);
    }

    try {
      if(!table.getTableDescriptor().hasFamily(columnFamily)) {
        throw new IOException("Table " + tableName +
            " has no such column family " + columnFamily);
      }
    } catch (IOException e) {
      //Get getTableDescriptor also throws IOException, so catch the IOException
      //thrown above or by the getTableDescriptor() call.
      throw new FlumeException("Error getting column family from HBase." +
          "Please verify that the table "+ tableName +" and Column Family,  "
          + columnFamily + " exists in HBase.", e);
    }

    super.start();
View Full Code Here

  public void stop(){
    try {
      table.close();
      table = null;
    } catch (IOException e) {
      throw new FlumeException("Error closing table.", e);
    }
  }
View Full Code Here

        Interceptor.Builder builder = factory.newInstance(type);
        builder.configure(interceptorContext);
        interceptors.add(builder.build());
      } catch (ClassNotFoundException e) {
        LOG.error("Builder class not found. Exception follows.", e);
        throw new FlumeException("Interceptor.Builder not found.", e);
      } catch (InstantiationException e) {
        LOG.error("Could not instantiate Builder. Exception follows.", e);
        throw new FlumeException("Interceptor.Builder not constructable.", e);
      } catch (IllegalAccessException e) {
        LOG.error("Unable to access Builder. Exception follows.", e);
        throw new FlumeException("Unable to access Interceptor.Builder.", e);
      }
    }

    interceptorChain.setInterceptors(interceptors);
  }
View Full Code Here

    defaultChannels = getChannelListFromNames(
        context.getString(CONFIG_DEFAULT_CHANNEL), channelNameMap);

    if(defaultChannels.isEmpty()){
      throw new FlumeException("Default channel list empty");
    }

    Map<String, String> mapConfig =
        context.getSubProperties(CONFIG_PREFIX_MAPPING);

    channelMapping = new HashMap<String, List<Channel>>();

    for (String headerValue : mapConfig.keySet()) {
      List<Channel> configuredChannels = getChannelListFromNames(
          mapConfig.get(headerValue),
          channelNameMap);

      //This should not go to default channel(s)
      //because this seems to be a bad way to configure.
      if (configuredChannels.size() == 0) {
        throw new FlumeException("No channel configured for when "
            + "header value is: " + headerValue);
      }

      if (channelMapping.put(headerValue, configuredChannels) != null) {
        throw new FlumeException("Selector channel configured twice");
      }
    }
    //If no mapping is configured, it is ok.
    //All events will go to the default channel(s).
View Full Code Here

TOP

Related Classes of org.apache.flume.FlumeException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.