Package org.apache.drill.common.exceptions

Examples of org.apache.drill.common.exceptions.DrillRuntimeException


          .getCollectionName());
      CommandResult stats = collection.getStats();
      return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT,
          stats.getLong(COUNT), 1, (float) stats.getDouble(SIZE));
    } catch (Exception e) {
      throw new DrillRuntimeException(e.getMessage(), e);
    }
  }
View Full Code Here


        }
      }

      table.close();
    } catch (IOException e) {
      throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
    }
    verifyColumns();
  }
View Full Code Here

    try {
      newGroupsScan = new MongoGroupScan(groupScan.getStoragePlugin(),
          newScanSpec, groupScan.getColumns());
    } catch (IOException e) {
      logger.error(e.getMessage(), e);
      throw new DrillRuntimeException(e.getMessage(), e);
    }
    newGroupsScan.setFilterPushedDown(true);

    final ScanPrel newScanPrel = ScanPrel.create(scan, filter.getTraitSet(),
        newGroupsScan, scan.getRowType());
View Full Code Here

      MongoClient client = MongoCnxnManager.getClient(addresses, clientOptions);
      DB db = client.getDB(subScanSpec.getDbName());
      db.setReadPreference(ReadPreference.nearest());
      collection = db.getCollection(subScanSpec.getCollectionName());
    } catch (UnknownHostException e) {
      throw new DrillRuntimeException(e.getMessage(), e);
    }
  }
View Full Code Here

          docCount++;
          break;

        case WRITE_FAILED:
          if (docCount == 0) {
            throw new DrillRuntimeException(errMsg);
          }
          logger.warn(errMsg, doc);
          break done;

        default:
          break done;
        }
      }

      for (SchemaPath sp : jsonReaderWithState.getNullColumns()) {
        PathSegment root = sp.getRootSegment();
        BaseWriter.MapWriter fieldWriter = writer.rootAsMap();
        if (root.getChild() != null && !root.getChild().isArray()) {
          fieldWriter = fieldWriter.map(root.getNameSegment().getPath());
          while (root.getChild().getChild() != null
              && !root.getChild().isArray()) {
            fieldWriter = fieldWriter.map(root.getChild().getNameSegment()
                .getPath());
            root = root.getChild();
          }
          fieldWriter.integer(root.getChild().getNameSegment().getPath());
        } else {
          fieldWriter.integer(root.getNameSegment().getPath());
        }
      }

      writer.setValueCount(docCount);
      logger.debug("Took {} ms to get {} records",
          watch.elapsed(TimeUnit.MILLISECONDS), rowCount);
      return docCount;
    } catch (Exception e) {
      logger.error(e.getMessage(), e);
      throw new DrillRuntimeException("Failure while reading Mongo Record.", e);
    }
  }
View Full Code Here

    Stopwatch watch = new Stopwatch();
    watch.start();
    int rowCount = 0;

    if (valueVector == null) {
      throw new DrillRuntimeException("Value vector is not initialized!!!");
    }
    valueVector.clear();
    valueVector
        .allocateNew(4 * 1024 * TARGET_RECORD_COUNT, TARGET_RECORD_COUNT);

    String errMsg = "Document {} is too big to fit into allocated ValueVector";

    try {
      for (; rowCount < TARGET_RECORD_COUNT && cursor.hasNext(); rowCount++) {
        String doc = cursor.next().toString();
        byte[] record = doc.getBytes(Charsets.UTF_8);
        if (!valueVector.getMutator().setSafe(rowCount, record, 0,
            record.length)) {
          logger.warn(errMsg, doc);
          if (rowCount == 0) {
            break;
          }
        }
      }
      valueVector.getMutator().setValueCount(rowCount);
      logger.debug("Took {} ms to get {} records",
          watch.elapsed(TimeUnit.MILLISECONDS), rowCount);
      return rowCount;
    } catch (Exception e) {
      logger.error(e.getMessage(), e);
      throw new DrillRuntimeException("Failure while reading Mongo Record.", e);
    }
  }
View Full Code Here

    try {
      FragmentContext context = new FragmentContext(bit.getContext(), BitControl.PlanFragment.getDefaultInstance(), null, bit.getContext().getFunctionImplementationRegistry());
      return creator.getBatch(context,scanPOP, children);
    } catch (Exception ex) {
      throw new DrillRuntimeException("Error when setup fragment context" + ex);
    }
  }
View Full Code Here

    ByteBuf b;
    try {
      b = parentColumnReader.parentReader.getOperatorContext().getAllocator().buffer(size);
      //b = UnpooledByteBufAllocator.DEFAULT.heapBuffer(size);
    }catch(Exception e){
      throw new DrillRuntimeException("Unable to allocate "+size+" bytes of memory in the Parquet Reader."+
        "[Exception: "+e.getMessage()+"]"
      );
    }
    return b;
  }
View Full Code Here

    sb.append(msg).append(" - Parser was at record: ").append(recordCount+1);
    if (e instanceof JsonParseException) {
      JsonParseException ex = JsonParseException.class.cast(e);
      sb.append(" column: ").append(ex.getLocation().getColumnNr());
    }
    throw new DrillRuntimeException(sb.toString(), e);
  }
View Full Code Here

    this.rowType = rowType;
    this.columns = columns == null || columns.size() == 0 ? GroupScan.ALL_COLUMNS : columns;
    try {
      this.groupScan = drillTable.getGroupScan().clone(this.columns);
    } catch (IOException e) {
      throw new DrillRuntimeException("Failure creating scan.", e);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.drill.common.exceptions.DrillRuntimeException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.