Package org.apache.drill.common.exceptions

Examples of org.apache.drill.common.exceptions.ExecutionSetupException


        MaterializedField field = MaterializedField.create(SchemaPath.getSimplePath(partitionColumnDesignator + i), Types.optional(MinorType.VARCHAR));
        ValueVector v = mutator.addField(field, NullableVarCharVector.class);
        partitionVectors.add(v);
      }
    } catch(SchemaChangeException e) {
      throw new ExecutionSetupException(e);
    }
  }
View Full Code Here


            parquet.column.Encoding.valueOf(pageHeader.dictionary_page_header.encoding.name())
        );
        this.dictionary = page.getEncoding().initDictionary(parentStatus.columnDescriptor, page);
      }
    } catch (IOException e) {
      throw new ExecutionSetupException("Error opening or reading metatdata for parquet file at location: " + path.getName(), e);
    }
   
  }
View Full Code Here

        }else if(type == String.class){
          writers[i] = new StringWriter(f);
        }else if (type == Timestamp.class) {
          writers[i] = new NTimeStampWriter(f);
        }else{
          throw new ExecutionSetupException(String.format("PojoRecord reader doesn't yet support conversions from type [%s].", type));
        }
        writers[i].init(output);
      }
    }catch(SchemaChangeException e){
      throw new ExecutionSetupException("Failure while setting up schema for PojoRecordReader.", e);
    }

  }
View Full Code Here

  public RecordBatch getWriterBatch(FragmentContext context, RecordBatch incoming, ParquetWriter writer)
          throws ExecutionSetupException {
    try {
      return new WriterRecordBatch(writer, incoming, context, getRecordWriter(context, writer));
    } catch(IOException e) {
      throw new ExecutionSetupException(String.format("Failed to create the WriterRecordBatch. %s", e.getMessage()), e);
    }
  }
View Full Code Here

          factories.add(new WorkspaceSchemaFactory(context.getConfig(), context.getPersistentStoreProvider(), this, "default", name, fs, WorkspaceConfig.DEFAULT, matchers));
        }
      }
      this.schemaFactory = new FileSystemSchemaFactory(name, factories);
    }catch(IOException e){
      throw new ExecutionSetupException("Failure setting up file system plugin.", e);
    }
  }
View Full Code Here

      writer = new VectorContainerWriter(output);
      recordMaterializer = new DrillParquetRecordMaterializer(writer, projection);
      primitiveVectors = writer.getMapVector().getPrimitiveVectors();
      recordReader = columnIO.getRecordReader(pageReadStore, recordMaterializer);
    } catch (Exception e) {
      throw new ExecutionSetupException(e);
    }
  }
View Full Code Here

    }
    rowGroupOffset = footer.getBlocks().get(rowGroupIndex).getColumns().get(0).getFirstDataPageOffset();

    // none of the columns in the parquet file matched the request columns from the query
    if (columnsToScan == 0){
      throw new ExecutionSetupException("Error reading from parquet file. No columns requested were found in the file.");
    }
    if (allFieldsFixedLength) {
      recordsPerBatch = (int) Math.min(Math.min(batchSize / bitWidthAllFixedFields,
          footer.getBlocks().get(0).getColumns().get(0).getValueCount()), 65535);
    }
    else {
      recordsPerBatch = DEFAULT_RECORDS_TO_READ_IF_NOT_FIXED_WIDTH;
    }

    try {
      ValueVector v;
      ConvertedType convertedType;
      SchemaElement schemaElement;
      ArrayList<VarLengthColumn> varLengthColumns = new ArrayList<>();
      // initialize all of the column read status objects
      boolean fieldFixedLength = false;
      for (int i = 0; i < columns.size(); ++i) {
        column = columns.get(i);
        columnChunkMetaData = footer.getBlocks().get(rowGroupIndex).getColumns().get(i);
        schemaElement = schemaElements.get(column.getPath()[0]);
        convertedType = schemaElement.getConverted_type();
        MajorType type = ParquetToDrillTypeConverter.toMajorType(column.getType(), schemaElement.getType_length(), getDataMode(column), schemaElement);
        field = MaterializedField.create(toFieldName(column.getPath()), type);
        // the field was not requested to be read
        if ( ! fieldSelected(field)) continue;

        fieldFixedLength = column.getType() != PrimitiveType.PrimitiveTypeName.BINARY;
        v = output.addField(field, (Class<? extends ValueVector>) TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()));
        if (column.getType() != PrimitiveType.PrimitiveTypeName.BINARY) {
          if (column.getMaxRepetitionLevel() > 0) {
            ColumnReader dataReader = ColumnReaderFactory.createFixedColumnReader(this, fieldFixedLength, column, columnChunkMetaData, recordsPerBatch,
                ((RepeatedFixedWidthVector) v).getMutator().getDataVector(), schemaElement);
            varLengthColumns.add(new FixedWidthRepeatedReader(this, dataReader,
                getTypeLengthInBits(column.getType()), -1, column, columnChunkMetaData, false, v, schemaElement));
          }
          else {
            columnStatuses.add(ColumnReaderFactory.createFixedColumnReader(this, fieldFixedLength, column, columnChunkMetaData, recordsPerBatch, v,
                schemaElement));
          }
        } else {
          // create a reader and add it to the appropriate list
          varLengthColumns.add(ColumnReaderFactory.getReader(this, -1, column, columnChunkMetaData, false, v, schemaElement));
        }
      }
      varLengthReader = new VarLenBinaryReader(this, varLengthColumns);
    } catch (SchemaChangeException e) {
      throw new ExecutionSetupException(e);
    } catch (Exception e) {
      throw new ExecutionSetupException(e);
    }
  }
View Full Code Here

      else if (columnDescriptor.getType() == PrimitiveType.PrimitiveTypeName.DOUBLE) {
        return new NullableFixedByteAlignedReaders.NullableDictionaryFloat8Reader(parentReader, allocateSize, columnDescriptor, columnChunkMetaData,
            fixedLength, (NullableFloat8Vector)valueVec, schemaElement);
      }
      else{
        throw new ExecutionSetupException("Unsupported nullable column type " + columnDescriptor.getType().name() );
      }
    }
  }
View Full Code Here

  public RecordBatch getWriterBatch(FragmentContext context, RecordBatch incoming, EasyWriter writer)
      throws ExecutionSetupException {
    try {
      return new WriterRecordBatch(writer, incoming, context, getRecordWriter(context, writer));
    } catch(IOException e) {
      throw new ExecutionSetupException(String.format("Failed to create the WriterRecordBatch. %s", e.getMessage()), e);
    }
  }
View Full Code Here

          }
        } else {
          partitionColumns.add(new String[] {});
        }
      } catch (IOException e1) {
        throw new ExecutionSetupException(e1);
      }
    }

    if (selectAllColumns) {
      for (int i = 0; i < numParts; i++) {
View Full Code Here

TOP

Related Classes of org.apache.drill.common.exceptions.ExecutionSetupException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.