Examples of CheckResult


Examples of hu.sztaki.ilab.longneck.process.constraint.CheckResult

           
            for (String fieldName : applyTo) {
                BlockUtils.setValue(fieldName, outValue, record, parentScope);
            }
        } catch (IllegalArgumentException ex) {
            throw new CheckError(new CheckResult(this, false, from, dateValue,
                    String.format("Field '%1$s' content '%2$s' does not match date pattern '%3$s'.",
                    from, dateValue, fromPattern)));
        } catch (UnsupportedOperationException ex) {
            log.error("joda-time pattern-based parsing is unsupported.", ex);
        }
View Full Code Here

Examples of hu.sztaki.ilab.longneck.process.constraint.CheckResult

        VariableSpace scope = new VariableSpace();
        scope.setVariable("a1", "aaa");
        Record r = new RecordImpl();
       
        c.check(r, scope);
        CheckResult res = c.check(r, scope);
        Assert.assertTrue(res.isPassed());
    }
View Full Code Here

Examples of hu.sztaki.ilab.longneck.process.constraint.CheckResult

        c.setApplyTo(Arrays.asList(new String[] { "$a1" }));
       
        VariableSpace scope = new VariableSpace();
        Record r = new RecordImpl();

        CheckResult res = c.check(r, scope);
        Assert.assertFalse(res.isPassed());
    }
View Full Code Here

Examples of hu.sztaki.ilab.longneck.process.constraint.CheckResult

    /** The contained constraints. */
    private AndOperator constraints = new AndOperator();
   
    @Override
    public void apply(Record record, VariableSpace parentScope) throws CheckError {
        CheckResult res;
        if (checkedField == null || !BlockUtils.exists(checkedField, record, parentScope)) {
            res = new CheckResult(constraints.check(record, parentScope), this, summary);
        } else {
            CheckResult andresult = constraints.check(record, parentScope);
            res = new CheckResult(this, andresult.isPassed(), checkedField, BlockUtils.getValue(checkedField, record, parentScope), summary, andresult.getCauses());
        }
        if (! res.isPassed()) {
            throw new CheckError(res);           
        }
    }
View Full Code Here

Examples of hu.sztaki.ilab.longneck.process.constraint.CheckResult

    /* The checked field, what we want to test in the check box.*/
    private String faildField;
   
    @Override
    public void apply(Record record, VariableSpace parentScope) throws FailException {
        record.addError(new CheckResult(this, false,
                faildField == null || !BlockUtils.exists(faildField, record, parentScope)?null:faildField,
                faildField == null? null:BlockUtils.getValue(faildField, record, parentScope),
                summary == null?"Intentional failure.":summary));
       
        throw new FailException(summary == null?"Intentional failure.":summary);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.CheckResult

   * @param msckDesc
   *          Information about the tables and partitions we want to check for.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   */
  private int msck(Hive db, MsckDesc msckDesc) {
    CheckResult result = new CheckResult();
    List<String> repairOutput = new ArrayList<String>();
    try {
      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
      checker.checkMetastore(db.getCurrentDatabase(), msckDesc
          .getTableName(), msckDesc.getPartSpecs(), result);
      if (msckDesc.isRepairPartitions()) {
        Table table = db.getTable(msckDesc.getTableName());
        for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
          try {
            db.createPartition(table, Warehouse.makeSpecFromName(part
                .getPartitionName()));
            repairOutput.add("Repair: Added partition to metastore "
                + msckDesc.getTableName() + ':' + part.getPartitionName());
          } catch (Exception e) {
            LOG.warn("Repair error, could not add partition to metastore: ", e);
          }
        }
      }
    } catch (HiveException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } catch (IOException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } finally {
      BufferedWriter resultOut = null;
      try {
        Path resFile = new Path(msckDesc.getResFile());
        FileSystem fs = resFile.getFileSystem(conf);
        resultOut = new BufferedWriter(new OutputStreamWriter(fs
            .create(resFile)));

        boolean firstWritten = false;
        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
            "Tables not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
            "Tables missing on filesystem:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
            "Partitions not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
            "Partitions missing from filesystem:", resultOut, firstWritten);
        for (String rout : repairOutput) {
          if (firstWritten) {
            resultOut.write(terminator);
          } else {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.CheckResult

   * we want to check for.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   */
  private int msck(Hive db, FileSystem fs, MsckDesc msckDesc) {
   
    CheckResult result = new CheckResult();
    try {
      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db, fs);
      checker.checkMetastore(
        MetaStoreUtils.DEFAULT_DATABASE_NAME, msckDesc.getTableName(),
        msckDesc.getPartitionSpec(),
        result);
    } catch (HiveException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } catch (IOException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } finally {
           
      BufferedWriter resultOut = null;
      try {
        resultOut = new BufferedWriter(
            new OutputStreamWriter(fs.create(msckDesc.getResFile())));
       
        boolean firstWritten = false;
        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
            "Tables not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
            "Tables missing on filesystem:", resultOut, firstWritten);     
        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
            "Partitions not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
            "Partitions missing from filesystem:", resultOut, firstWritten);     
      } catch (IOException e) {
        LOG.warn("Failed to save metacheck output: ", e);
        return 1;
      } finally {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.CheckResult

   * @param msckDesc
   *          Information about the tables and partitions we want to check for.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   */
  private int msck(Hive db, MsckDesc msckDesc) {
    CheckResult result = new CheckResult();
    List<String> repairOutput = new ArrayList<String>();
    try {
      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
      Table t = db.newTable(msckDesc.getTableName());
      checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
      if (msckDesc.isRepairPartitions()) {
        Table table = db.getTable(msckDesc.getTableName());
        for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
          try {
            db.createPartition(table, Warehouse.makeSpecFromName(part
                .getPartitionName()));
            repairOutput.add("Repair: Added partition to metastore "
                + msckDesc.getTableName() + ':' + part.getPartitionName());
          } catch (Exception e) {
            LOG.warn("Repair error, could not add partition to metastore: ", e);
          }
        }
      }
    } catch (HiveException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } catch (IOException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } finally {
      BufferedWriter resultOut = null;
      try {
        Path resFile = new Path(msckDesc.getResFile());
        FileSystem fs = resFile.getFileSystem(conf);
        resultOut = new BufferedWriter(new OutputStreamWriter(fs
            .create(resFile)));

        boolean firstWritten = false;
        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
            "Tables not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
            "Tables missing on filesystem:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
            "Partitions not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
            "Partitions missing from filesystem:", resultOut, firstWritten);
        for (String rout : repairOutput) {
          if (firstWritten) {
            resultOut.write(terminator);
          } else {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.CheckResult

   * @param msckDesc
   *          Information about the tables and partitions we want to check for.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   */
  private int msck(Hive db, MsckDesc msckDesc) {
    CheckResult result = new CheckResult();
    List<String> repairOutput = new ArrayList<String>();
    try {
      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
      Table t = db.newTable(msckDesc.getTableName());
      checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
      if (msckDesc.isRepairPartitions()) {
        Table table = db.getTable(msckDesc.getTableName());
        for (CheckResult.PartitionResult part : result.getPartitionsNotInMs()) {
          try {
            db.createPartition(table, Warehouse.makeSpecFromName(part
                .getPartitionName()));
            repairOutput.add("Repair: Added partition to metastore "
                + msckDesc.getTableName() + ':' + part.getPartitionName());
          } catch (Exception e) {
            LOG.warn("Repair error, could not add partition to metastore: ", e);
          }
        }
      }
    } catch (HiveException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } catch (IOException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } finally {
      BufferedWriter resultOut = null;
      try {
        Path resFile = new Path(msckDesc.getResFile());
        FileSystem fs = resFile.getFileSystem(conf);
        resultOut = new BufferedWriter(new OutputStreamWriter(fs
            .create(resFile)));

        boolean firstWritten = false;
        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
            "Tables not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
            "Tables missing on filesystem:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
            "Partitions not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
            "Partitions missing from filesystem:", resultOut, firstWritten);
        for (String rout : repairOutput) {
          if (firstWritten) {
            resultOut.write(terminator);
          } else {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.metadata.CheckResult

   * @param msckDesc
   *          Information about the tables and partitions we want to check for.
   * @return Returns 0 when execution succeeds and above 0 if it fails.
   */
  private int msck(Hive db, MsckDesc msckDesc) {
    CheckResult result = new CheckResult();
    List<String> repairOutput = new ArrayList<String>();
    try {
      HiveMetaStoreChecker checker = new HiveMetaStoreChecker(db);
      Table t = db.newTable(msckDesc.getTableName());
      checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
      List<CheckResult.PartitionResult> partsNotInMs = result.getPartitionsNotInMs();
      if (msckDesc.isRepairPartitions() && !partsNotInMs.isEmpty()) {
        Table table = db.getTable(msckDesc.getTableName());
        AddPartitionDesc apd = new AddPartitionDesc(
            table.getDbName(), table.getTableName(), false);
        try {
          for (CheckResult.PartitionResult part : partsNotInMs) {
            apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null);
            repairOutput.add("Repair: Added partition to metastore "
                + msckDesc.getTableName() + ':' + part.getPartitionName());
          }
          db.createPartitions(apd);
        } catch (Exception e) {
          LOG.info("Could not bulk-add partitions to metastore; trying one by one", e);
          repairOutput.clear();
          msckAddPartitionsOneByOne(db, table, partsNotInMs, repairOutput);
        }
      }
    } catch (HiveException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } catch (IOException e) {
      LOG.warn("Failed to run metacheck: ", e);
      return 1;
    } finally {
      BufferedWriter resultOut = null;
      try {
        Path resFile = new Path(msckDesc.getResFile());
        FileSystem fs = resFile.getFileSystem(conf);
        resultOut = new BufferedWriter(new OutputStreamWriter(fs
            .create(resFile)));

        boolean firstWritten = false;
        firstWritten |= writeMsckResult(result.getTablesNotInMs(),
            "Tables not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getTablesNotOnFs(),
            "Tables missing on filesystem:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotInMs(),
            "Partitions not in metastore:", resultOut, firstWritten);
        firstWritten |= writeMsckResult(result.getPartitionsNotOnFs(),
            "Partitions missing from filesystem:", resultOut, firstWritten);
        for (String rout : repairOutput) {
          if (firstWritten) {
            resultOut.write(terminator);
          } else {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.