Package org.apache.accumulo.server.fs

Examples of org.apache.accumulo.server.fs.FileRef


      KeyExtent extent = extents[i];
     
      String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID, zl);
      SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
     
      if (i == extentToSplit) {
        splitMapFiles = mapFiles;
      }
      int tid = 0;
View Full Code Here


  }

  FileRef getNextMapFilename(String prefix) throws IOException {
    String extension = FileOperations.getNewFileExtension(tabletServer.getTableConfiguration(extent));
    checkTabletDir();
    return new FileRef(location.toString() + "/" + prefix + UniqueNameAllocator.getInstance().getNextName() + "." + extension);
  }
View Full Code Here

      FileStatus[] files = fs.listStatus(location);
      Collection<String> goodPaths = RootFiles.cleanupReplacement(fs, files, true);
      for (String good : goodPaths) {
        Path path = new Path(good);
        String filename = path.getName();
        FileRef ref = new FileRef(location.toString() + "/" + filename, path);
        DataFileValue dfv = new DataFileValue(0, 0);
        datafiles.put(ref, dfv);
      }
    } else {

      Text rowName = extent.getMetadataEntry();

      String tableId = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), tableId, Authorizations.EMPTY);

      // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
      // reduced batch size to improve performance
      // changed here after endKeys were implemented from 10 to 1000
      mdScanner.setBatchSize(1000);

      // leave these in, again, now using endKey for safety
      mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);

      mdScanner.setRange(new Range(rowName));

      for (Entry<Key,Value> entry : mdScanner) {

        if (entry.getKey().compareRow(rowName) != 0) {
          break;
        }

        FileRef ref = new FileRef(fs, entry.getKey());
        datafiles.put(ref, new DataFileValue(entry.getValue().get()));
      }
    }
    return datafiles;
  }
View Full Code Here

    Text row = extent.getMetadataEntry();
    for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
      Key key = entry.getKey();
      if (key.getRow().equals(row) && key.getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
        scanFiles.add(new FileRef(fs, key));
      }
    }

    return scanFiles;
  }
View Full Code Here

  private synchronized MinorCompactionTask prepareForMinC(long flushId, MinorCompactionReason mincReason) {
    CommitSession oldCommitSession = tabletMemory.prepareForMinC();
    otherLogs = currentLogs;
    currentLogs = new HashSet<DfsLogger>();

    FileRef mergeFile = null;
    if (mincReason != MinorCompactionReason.RECOVERY) {
      mergeFile = datafileManager.reserveMergingMinorCompactionFile();
    }

    return new MinorCompactionTask(mergeFile, oldCommitSession, flushId, mincReason);
View Full Code Here

  private Map<FileRef,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<FileRef,DataFileValue> allFiles) throws IOException {
    Map<FileRef,Pair<Key,Key>> result = new HashMap<FileRef,Pair<Key,Key>>();
    FileOperations fileFactory = FileOperations.getInstance();
    for (Entry<FileRef,DataFileValue> entry : allFiles.entrySet()) {
      FileRef file = entry.getKey();
      FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
      FileSKVIterator openReader = fileFactory.openReader(file.path().toString(), true, ns, ns.getConf(), this.getTableConfiguration());
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        result.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

          numToCompact = filesToCompact.size() - maxFilesToCompact + 1;
        }

        Set<FileRef> smallestFiles = removeSmallest(filesToCompact, numToCompact);

        FileRef fileName = getNextMapFilename((filesToCompact.size() == 0 && !propogateDeletes) ? "A" : "C");
        FileRef compactTmpName = new FileRef(fileName.path().toString() + "_tmp");

        AccumuloConfiguration tableConf = createTableConfiguration(acuTableConf, plan);

        Span span = Trace.start("compactFiles");
        try {
View Full Code Here

      if (datafileSizes.size() >= maxFiles) {
        // find the smallest file

        long min = Long.MAX_VALUE;
        FileRef minName = null;

        for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
          if (entry.getValue().getSize() < min && !majorCompactingFiles.contains(entry.getKey())) {
            min = entry.getValue().getSize();
            minName = entry.getKey();
View Full Code Here

    public void run() {
      minorCompactionWaitingToStart = false;
      minorCompactionInProgress = true;
      Span minorCompaction = Trace.on("minorCompaction");
      try {
        FileRef newMapfileLocation = getNextMapFilename(mergeFile == null ? "F" : "M");
        FileRef tmpFileRef = new FileRef(newMapfileLocation.path() + "_tmp");
        Span span = Trace.start("waitForCommits");
        synchronized (Tablet.this) {
          commitSession.waitForCommitsToFinish();
        }
        span.stop();
View Full Code Here

        Map<FileRef,MapFileInfo> fileRefMap = new HashMap<FileRef,MapFileInfo>();
        for (Entry<String,MapFileInfo> mapping : fileMap.entrySet()) {
          Path path = new Path(mapping.getKey());
          FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
          path = ns.makeQualified(path);
          fileRefMap.put(new FileRef(path.toString(), path), mapping.getValue());
        }

        Tablet importTablet = onlineTablets.get(new KeyExtent(tke));

        if (importTablet == null) {
View Full Code Here

TOP

Related Classes of org.apache.accumulo.server.fs.FileRef

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.