Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVIterator


    /*
     * Begin the import - iterate the files in the path
     */
    for (FileStatus importStatus : fs.listStatus(importPath)) {
      try {
        FileSKVIterator importIterator = FileOperations.getInstance().openReader(importStatus.getPath().toString(), true, fs, fs.getConf(),
            AccumuloConfiguration.getDefaultConfiguration());
        while (importIterator.hasTop()) {
          Key key = importIterator.getTopKey();
          Value value = importIterator.getTopValue();
          if (setTime) {
            key.setTimestamp(time);
          }
          Mutation mutation = new Mutation(key.getRow());
          if (!key.isDeleted()) {
            mutation.put(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()), key.getTimestamp(),
                value);
          } else {
            mutation.putDelete(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()),
                key.getTimestamp());
          }
          table.addMutation(mutation);
          importIterator.next();
        }
      } catch (Exception e) {
        FSDataOutputStream failureWriter = null;
        DataInputStream failureReader = null;
        try {
View Full Code Here


      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(compactTmpName, false, fs, conf, tableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful major compaction fails!!!", ex);
        throw ex;
      }
     
View Full Code Here

     
      long rtime = Long.MIN_VALUE;
      for (String path : datafiles.keySet()) {
        String filename = new Path(path).getName();
       
        FileSKVIterator reader = FileOperations.getInstance().openReader(this.location + "/" + filename, true, fs, fs.getConf(),
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), Constants.METADATA_TABLE_ID));
        long maxTime = -1;
        try {
         
          while (reader.hasTop()) {
            maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
            reader.next();
          }
         
        } finally {
          reader.close();
        }
       
        if (maxTime > rtime) {
          time = TabletTime.LOGICAL_TIME_ID + "" + maxTime;
          rtime = maxTime;
View Full Code Here

    for (String mapFile : mapFiles) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
            .toString());
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, tableConf);
       
        readers.add(reader);
        iters.add(new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader));
       
      } catch (Throwable e) {
       
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile, e));
       
        log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
        // failed to open some map file... close the ones that were opened
        for (FileSKVIterator reader : readers) {
          try {
            reader.close();
          } catch (Throwable e2) {
            log.warn("Failed to close map file", e2);
          }
        }
       
View Full Code Here

    }
   
  }
 
  private static void checkIndex(Reader reader) throws IOException {
    FileSKVIterator indexIter = reader.getIndex();
   
    if (indexIter.hasTop()) {
      Key lastKey = new Key(indexIter.getTopKey());
     
      if (reader.getFirstKey().compareTo(lastKey) > 0)
        throw new RuntimeException("First key out of order " + reader.getFirstKey() + " " + lastKey);
     
      indexIter.next();
     
      while (indexIter.hasTop()) {
        if (lastKey.compareTo(indexIter.getTopKey()) > 0)
          throw new RuntimeException("Indext out of order " + lastKey + " " + indexIter.getTopKey());
       
        lastKey = new Key(indexIter.getTopKey());
        indexIter.next();
       
      }
     
      if (reader.getLastKey().compareTo(lastKey) != 0) {
        throw new RuntimeException("Last key out of order " + reader.getLastKey() + " " + lastKey);
View Full Code Here

   
    trf.writer.close();
   
    trf.openReader();
   
    FileSKVIterator indexIter = trf.reader.getIndex();
    int count = 0;
    while (indexIter.hasTop()) {
      count++;
      indexIter.next();
    }
   
    assert (count > 4);
   
    trf.iter.seek(new Range(nk("r0000", "cf1", "cq1", "", 1), true, nk("r0001", "cf1", "cq1", "", 1), false), EMPTY_COL_FAMS, false);
View Full Code Here

      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, tableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
View Full Code Here

    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, tableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
        if (filesToCompact.get(mapFile).isTimeSet()) {
          iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
        }
       
        iters.add(iter);
       
      } catch (Throwable e) {
       
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile, e));
       
        log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
        // failed to open some map file... close the ones that were opened
        for (FileSKVIterator reader : readers) {
          try {
            reader.close();
          } catch (Throwable e2) {
            log.warn("Failed to close map file", e2);
          }
        }
       
View Full Code Here

   
    readers.clear();

    // TODO need to close files
    for (String file : absFiles) {
      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
      readers.add(reader);
    }
   
    MultiIterator multiIter = new MultiIterator(readers, extent);
   
View Full Code Here

  }
 
  @Override
  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
      AccumuloConfiguration tableConf) throws IOException {
    FileSKVIterator iter = openReader(file, false, fs, conf, tableConf, null, null);
    iter.seek(range, columnFamilies, inclusive);
    return iter;
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.