Package org.apache.drill.common.exceptions

Examples of org.apache.drill.common.exceptions.ExecutionSetupException


 
  public StorageEngine getEngine(StorageEngineConfig engineConfig) throws ExecutionSetupException{
    StorageEngine engine = activeEngines.get(engineConfig);
    if(engine != null) return engine;
    Constructor<? extends StorageEngine> c = availableEngines.get(engineConfig.getClass());
    if(c == null) throw new ExecutionSetupException(String.format("Failure finding StorageEngine constructor for config %s", engineConfig));
    try {
      return c.newInstance(engineConfig, context);
    } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
      Throwable t = e instanceof InvocationTargetException ? ((InvocationTargetException)e).getTargetException() : e;
      if(t instanceof ExecutionSetupException) throw ((ExecutionSetupException) t);
      throw new ExecutionSetupException(String.format("Failure setting up new storage engine configuration for config %s", engineConfig), t);
    }
  }
View Full Code Here


                ParquetFileReader.readFooter( rowGroupScan.getStorageEngine().getFileSystem().getConf(), new Path(e.getPath())),
                rowGroupScan.getRef()
            )
        );
      } catch (IOException e1) {
        throw new ExecutionSetupException(e1);
      }
    }
    return new ScanBatch(context, readers.iterator());
  }
View Full Code Here

          }
        }
      }
      varLengthReader = new VarLenBinaryReader(this, varLengthColumns, nullableVarLengthColumns);
    } catch (SchemaChangeException e) {
      throw new ExecutionSetupException(e);
    }
  }
View Full Code Here

      for (VarLenBinaryReader.NullableVarLengthColumn r : varLengthReader.nullableColumns) {
        output.addField(r.valueVecHolder.getValueVector());
      }
      output.setNewSchema();
    }catch(SchemaChangeException e) {
      throw new ExecutionSetupException("Error setting up output mutator.", e);
    }

    // the method for reading into a ByteBuf from a stream copies all of the data into a giant buffer
    // here we do the same thing in a loop to not initialize so much on heap

    // TODO - this should be replaced by an enhancement in Hadoop 2.0 that will allow reading
    // directly into a ByteBuf passed into the reading method
    int totalByteLength = 0;
    long start = 0;
    if (rowGroupIndex == 0){
      totalByteLength = 4;
    }
    else{
      start = rowGroupOffset;
    }
    // TODO - the methods for get total size and get total uncompressed size seem to have the opposite results of
    // what they should
    // I found the bug in the mainline and made a issue for it, hopefully it will be fixed soon
    for (ColumnReader crs : columnStatuses){
      totalByteLength += crs.columnChunkMetaData.getTotalSize();
    }
    for (VarLenBinaryReader.VarLengthColumn r : varLengthReader.columns){
      totalByteLength += r.columnChunkMetaData.getTotalSize();
    }
    for (VarLenBinaryReader.NullableVarLengthColumn r : varLengthReader.nullableColumns){
      totalByteLength += r.columnChunkMetaData.getTotalSize();
    }
    int bufferSize = 64*1024;
    long totalBytesWritten = 0;
    int validBytesInCurrentBuffer;
    byte[] buffer = new byte[bufferSize];
   
    try (FSDataInputStream inputStream = fileSystem.open(hadoopPath)) {
      bufferWithAllData = allocator.buffer(totalByteLength);
      inputStream.seek(start);
      while (totalBytesWritten < totalByteLength){
        validBytesInCurrentBuffer = (int) Math.min(bufferSize, totalByteLength - totalBytesWritten);
        inputStream.read(buffer, 0 , validBytesInCurrentBuffer);
        bufferWithAllData.writeBytes(buffer, 0 , (int) validBytesInCurrentBuffer);
        totalBytesWritten += validBytesInCurrentBuffer;
      }
    } catch (IOException e) {
      throw new ExecutionSetupException("Error opening or reading metatdata for parquet file at location: " + hadoopPath.getName());
    }
  }
View Full Code Here

        valueVectors[i] = getVector(config.getTypes()[i].getName(), config.getTypes()[i].getMajorType(), batchRecordCount);
        output.addField(valueVectors[i]);
      }
      output.setNewSchema();
    } catch (SchemaChangeException e) {
      throw new ExecutionSetupException("Failure while setting up fields", e);
    }

  }
View Full Code Here

      JsonFactory factory = new JsonFactory();
      parser = factory.createJsonParser(fileSystem.open(hadoopPath));
      parser.nextToken(); // Read to the first START_OBJECT token
      generator = new SchemaIdGenerator();
    } catch (IOException e) {
      throw new ExecutionSetupException(e);
    }
  }
View Full Code Here

  public static RootExec getExec(FragmentContext context, FragmentRoot root) throws ExecutionSetupException {
    ImplCreator i = new ImplCreator();
    root.accept(i, context);
    if (i.root == null)
      throw new ExecutionSetupException(
          "The provided fragment did not have a root node that correctly created a RootExec value.");
    return i.getRoot();
  }
View Full Code Here

  public ScanBatch(FragmentContext context, Iterator<RecordReader> readers) throws ExecutionSetupException {
    this.context = context;
    this.readers = readers;
    if (!readers.hasNext())
      throw new ExecutionSetupException("A scan batch must contain at least one reader.");
    this.currentReader = readers.next();
    this.currentReader.setup(mutator);
  }
View Full Code Here

      Constructor<? extends PStoreProvider> c = storeProviderClass.getConstructor(PStoreRegistry.class);
      return c.newInstance(this);
    } catch (ConfigException.Missing | ClassNotFoundException | NoSuchMethodException | SecurityException
        | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
      logger.error(e.getMessage(), e);
      throw new ExecutionSetupException("A System Table provider was either not specified or could not be found or instantiated", e);
    }
  }
View Full Code Here

    Stopwatch watch = new Stopwatch();
    watch.start();
    root.accept(i, context);
    logger.debug("Took {} ms to accept", watch.elapsed(TimeUnit.MILLISECONDS));
    if (i.root == null)
      throw new ExecutionSetupException(
          "The provided fragment did not have a root node that correctly created a RootExec value.");
    return i.getRoot();
  }
View Full Code Here

TOP

Related Classes of org.apache.drill.common.exceptions.ExecutionSetupException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.