Package org.tmatesoft.hg.util

Examples of org.tmatesoft.hg.util.ProgressSupport


      throw new IllegalArgumentException();
    }
    if (csetTransform != null) {
      throw new ConcurrentModificationException();
    }
    final ProgressSupport progressHelper = getProgressSupport(handler);
    try {
      if (repo.getChangelog().getRevisionCount() == 0) {
        return;
      }
      final int firstCset = startRev;
      final int lastCset = endRev == TIP ? repo.getChangelog().getLastRevision() : endRev;
      // XXX pretty much like HgInternals.checkRevlogRange
      if (lastCset < 0 || lastCset > repo.getChangelog().getLastRevision()) {
        throw new HgBadArgumentException(String.format("Bad value %d for end revision", lastCset), null);
      }
      if (firstCset < 0 || firstCset > lastCset) {
        throw new HgBadArgumentException(String.format("Bad value %d for start revision for range [%1$d..%d]", firstCset, lastCset), null);
      }
      final int BATCH_SIZE = 100;
      count = 0;
      HgParentChildMap<HgChangelog> pw = getParentHelper(file == null); // leave it uninitialized unless we iterate whole repo
      // ChangesetTransfrom creates a blank PathPool, and #file(String, boolean) above
      // may utilize it as well. CommandContext? How about StatusCollector there as well?
      csetTransform = new ChangesetTransformer(repo, handler, pw, progressHelper, getCancelSupport(handler, true));
      // FilteringInspector is responsible to check command arguments: users, branches, limit, etc.
      // prior to passing cset to next Inspector, which is either (a) collector to reverse cset order, then invokes
      // transformer from (b), below, with alternative cset order or (b) transformer to hi-level csets.
      FilteringInspector filterInsp = new FilteringInspector();
      filterInsp.changesets(firstCset, lastCset);
      if (file == null) {
        progressHelper.start(lastCset - firstCset + 1);
        if (iterateDirection == HgIterateDirection.OldToNew) {
          filterInsp.delegateTo(csetTransform);
          repo.getChangelog().range(firstCset, lastCset, filterInsp);
          csetTransform.checkFailure();
        } else {
          assert iterateDirection == HgIterateDirection.NewToOld;
          BatchRangeHelper brh = new BatchRangeHelper(firstCset, lastCset, BATCH_SIZE, true);
          BatchChangesetInspector batchInspector = new BatchChangesetInspector(Math.min(lastCset-firstCset+1, BATCH_SIZE));
          filterInsp.delegateTo(batchInspector);
          // XXX this batching code is bit verbose, refactor
          while (brh.hasNext()) {
            brh.next();
            repo.getChangelog().range(brh.start(), brh.end(), filterInsp);
            for (BatchChangesetInspector.BatchRecord br : batchInspector.iterate(true)) {
              csetTransform.next(br.csetIndex, br.csetRevision, br.cset);
              csetTransform.checkFailure();
            }
            batchInspector.reset();
          }
        }
      } else {
        filterInsp.delegateTo(csetTransform);
        final HgFileRenameHandlerMixin withCopyHandler = Adaptable.Factory.getAdapter(handler, HgFileRenameHandlerMixin.class, null);
        FileRenameQueueBuilder frqBuilder = new FileRenameQueueBuilder();
        List<QueueElement> fileRenames = frqBuilder.buildFileRenamesQueue(firstCset, lastCset);
        progressHelper.start(fileRenames.size());
        for (int nameIndex = 0, fileRenamesSize = fileRenames.size(); nameIndex < fileRenamesSize; nameIndex++) {
          QueueElement curRename = fileRenames.get(nameIndex);
          HgDataFile fileNode = curRename.file();
          if (followAncestry) {
            TreeBuildInspector treeBuilder = new TreeBuildInspector(followAncestry);
            @SuppressWarnings("unused")
            List<HistoryNode> fileAncestry = treeBuilder.go(curRename);
            int[] commitRevisions = narrowChangesetRange(treeBuilder.getCommitRevisions(), firstCset, lastCset);
            if (iterateDirection == HgIterateDirection.OldToNew) {
              repo.getChangelog().range(filterInsp, commitRevisions);
              csetTransform.checkFailure();
            } else {
              assert iterateDirection == HgIterateDirection.NewToOld;
              // visit one by one in the opposite direction
              for (int i = commitRevisions.length-1; i >= 0; i--) {
                int csetWithFileChange = commitRevisions[i];
                repo.getChangelog().range(csetWithFileChange, csetWithFileChange, filterInsp);
              }
            }
          } else {
            // report complete file history (XXX may narrow range with [startRev, endRev], but need to go from file rev to link rev)
            int fileStartRev = curRename.fileFrom();
            int fileEndRev = curRename.file().getLastRevision(); //curRename.fileTo();
            if (iterateDirection == HgIterateDirection.OldToNew) {
              fileNode.history(fileStartRev, fileEndRev, filterInsp);
              csetTransform.checkFailure();
            } else {
              assert iterateDirection == HgIterateDirection.NewToOld;
              BatchRangeHelper brh = new BatchRangeHelper(fileStartRev, fileEndRev, BATCH_SIZE, true);
              BatchChangesetInspector batchInspector = new BatchChangesetInspector(Math.min(fileEndRev-fileStartRev+1, BATCH_SIZE));
              filterInsp.delegateTo(batchInspector);
              while (brh.hasNext()) {
                brh.next();
                fileNode.history(brh.start(), brh.end(), filterInsp);
                for (BatchChangesetInspector.BatchRecord br : batchInspector.iterate(true /*iterateDirection == IterateDirection.FromNewToOld*/)) {
                  csetTransform.next(br.csetIndex, br.csetRevision, br.cset);
                  csetTransform.checkFailure();
                }
                batchInspector.reset();
              }
            }
          }
          if (withCopyHandler != null && nameIndex + 1 < fileRenamesSize) {
            QueueElement nextRename = fileRenames.get(nameIndex+1);
            HgFileRevision src, dst;
            // A -> B
            if (iterateDirection == HgIterateDirection.OldToNew) {
              // curRename: A, nextRename: B
              src = curRename.last();
              dst = nextRename.first(src);
            } else {
              assert iterateDirection == HgIterateDirection.NewToOld;
              // curRename: B, nextRename: A
              src = nextRename.last();
              dst = curRename.first(src);
            }
            withCopyHandler.copy(src, dst);
          }
          progressHelper.worked(1);
        } // for renames
        frqBuilder.reportRenameIfNotInQueue(fileRenames, withCopyHandler);
      } // file != null
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      csetTransform = null;
      progressHelper.done();
    }
  }
View Full Code Here


      throw new HgBadArgumentException(String.format("Bad value %d for end revision", lastCset), null);
    }
    if (firstCset < 0 || startRev > lastCset) {
      throw new HgBadArgumentException(String.format("Bad value %d for start revision for range [%1$d..%d]", startRev, lastCset), null);
    }
    final ProgressSupport progressHelper = getProgressSupport(handler);
    final CancelSupport cancelHelper = getCancelSupport(handler, true);
    final HgFileRenameHandlerMixin renameHandler = Adaptable.Factory.getAdapter(handler, HgFileRenameHandlerMixin.class, null);

    try {

      // XXX rename. dispatcher is not a proper name (most of the job done - managing history chunk interconnection)
      final HandlerDispatcher dispatcher = new HandlerDispatcher() {
 
        @Override
        protected void once(HistoryNode n) throws HgCallbackTargetException, CancelledException, HgRuntimeException {
          handler.treeElement(ei.init(n, currentFileNode));
          cancelHelper.checkCancelled();
        }
      };
 
      // renamed files in the queue are placed with respect to #iterateDirection
      // i.e. if we iterate from new to old, recent filenames come first
      FileRenameQueueBuilder frqBuilder = new FileRenameQueueBuilder();
      List<QueueElement> fileRenamesQueue = frqBuilder.buildFileRenamesQueue(firstCset, lastCset);
      // XXX perhaps, makes sense to look at selected file's revision when followAncestry is true
      // to ensure file we attempt to trace is in the WC's parent. Native hg aborts if not.
      progressHelper.start(4 * fileRenamesQueue.size());
      for (int namesIndex = 0, renamesQueueSize = fileRenamesQueue.size(); namesIndex < renamesQueueSize; namesIndex++) {
  
        final QueueElement renameInfo = fileRenamesQueue.get(namesIndex);
        dispatcher.prepare(progressHelper, renameInfo);
        cancelHelper.checkCancelled();
        if (namesIndex > 0) {
          dispatcher.connectWithLastJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex - 1));
        }
        if (namesIndex + 1 < renamesQueueSize) {
          // there's at least one more name we are going to look at
          dispatcher.updateJunctionPoint(renameInfo, fileRenamesQueue.get(namesIndex+1), renameHandler != null);
        } else {
          dispatcher.clearJunctionPoint();
        }
        dispatcher.dispatchAllChanges();
        if (renameHandler != null && namesIndex + 1 < renamesQueueSize) {
          dispatcher.reportRenames(renameHandler);
        }
      } // for fileRenamesQueue;
      frqBuilder.reportRenameIfNotInQueue(fileRenamesQueue, renameHandler);
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    }
    progressHelper.done();
  }
View Full Code Here

   * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
   * @throws CancelledException if execution of the command was cancelled
   */
  public void execute() throws HgException, CancelledException {
    try {
      final ProgressSupport progress = getProgressSupport(null);
      final CancelSupport cancellation = getCancelSupport(null, true);
      cancellation.checkCancelled();
      progress.start(6);
      Internals internalRepo = Internals.getInstance(repo);
      if (cleanCheckout) {
        // remove tracked files from wd (perhaps, just forget 'Added'?)
        // for now, just delete each and every tracked file
        // TODO WorkingCopy container with getFile(HgDataFile/Path) to access files in WD
        HgDirstate dirstate = new HgInternals(repo).getDirstate();
        dirstate.walk(new HgDirstate.Inspector() {
         
          public boolean next(EntryKind kind, Record entry) {
            File f = new File(repo.getWorkingDir(), entry.name().toString());
            if (f.exists()) {
              f.delete();
            }
            return true;
          }
        });
      } else {
        throw new HgBadArgumentException("Sorry, only clean checkout is supported now, use #clean(true)", null);
      }
      progress.worked(1);
      cancellation.checkCancelled();
      final DirstateBuilder dirstateBuilder = new DirstateBuilder(internalRepo);
      final CheckoutWorker worker = new CheckoutWorker(internalRepo);
      HgManifest.Inspector insp = new HgManifest.Inspector() {
       
        public boolean next(Nodeid nid, Path fname, Flags flags) {
          if (worker.next(nid, fname, flags)) {
            // Mercurial seems to write "n   0  -1   unset fname" on `hg --clean co -rev <earlier rev>`
            // and the reason for 'force lookup' I suspect is a slight chance of simultaneous modification
            // of the file by user that doesn't alter its size the very second dirstate is being written
            // (or the file is being updated and the update brought in changes that didn't alter the file size -
            // with size and timestamp set, later `hg status` won't notice these changes)
           
            // However, as long as we use this class to write clean copies of the files, we can put all the fields
            // right away.
            int mtime = worker.getLastFileModificationTime();
            // Manifest flags are chars (despite octal values `hg manifest --debug` displays),
            // while dirstate keeps actual unix flags.
            int fmode = worker.getLastFileMode();
            dirstateBuilder.recordNormal(fname, fmode, mtime, worker.getLastFileSize());
            return true;
          }
          return false;
        }
       
        public boolean end(int manifestRevision) {
          return false;
        }
       
        public boolean begin(int mainfestRevision, Nodeid nid, int changelogRevision) {
          return true;
        }
      };
      // checkout tip if no revision set
      final int coRevision = revisionToCheckout.get(HgRepository.TIP);
      dirstateBuilder.parents(repo.getChangelog().getRevision(coRevision), null);
      repo.getManifest().walk(coRevision, coRevision, insp);
      worker.checkFailed();
      progress.worked(3);
      cancellation.checkCancelled();
      File dirstateFile = internalRepo.getRepositoryFile(Dirstate);
      try {
        FileChannel dirstateFileChannel = new FileOutputStream(dirstateFile).getChannel();
        dirstateBuilder.serialize(dirstateFileChannel);
        dirstateFileChannel.close();
      } catch (IOException ex) {
        throw new HgIOException("Can't write down new directory state", ex, dirstateFile);
      }
      progress.worked(1);
      cancellation.checkCancelled();
      String branchName = repo.getChangelog().range(coRevision, coRevision).get(0).branch();
      assert branchName != null;
      File branchFile = internalRepo.getRepositoryFile(Branch);
      if (HgRepository.DEFAULT_BRANCH_NAME.equals(branchName)) {
        // clean actual branch, if any
        if (branchFile.isFile()) {
          branchFile.delete();
        }
      } else {
        try {
          // branch file is UTF-8, see http://mercurial.selenic.com/wiki/EncodingStrategy#UTF-8_strings
          OutputStreamWriter ow = new OutputStreamWriter(new FileOutputStream(branchFile), EncodingHelper.getUTF8());
          ow.write(branchName);
          ow.close();
        } catch (IOException ex) {
          throw new HgIOException("Can't write down branch information", ex, branchFile);
        }
      }
      progress.worked(1);
      progress.done();
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    }
  }
View Full Code Here

    }

    public void next(int revisionNumber, int actualLen, int baseRevision, int linkRevision, int parent1Revision, int parent2Revision, byte[] nodeid, DataAccess da) {
      try {
        prepare(revisionNumber, da); // XXX perhaps, prepare shall return DA (sliced, if needed)
        final ProgressSupport progressSupport = ProgressSupport.Factory.get(sink);
        ByteBuffer buf = ByteBuffer.allocate(actualLen > 8192 ? 8192 : actualLen);
        Preview p = Adaptable.Factory.getAdapter(sink, Preview.class, null);
        if (p != null) {
          progressSupport.start(2 * da.length());
          while (!da.isEmpty()) {
            checkCancelled();
            da.readBytes(buf);
            p.preview(buf);
            buf.clear();
          }
          da.reset();
          prepare(revisionNumber, da);
          progressSupport.worked(da.length());
          buf.clear();
        } else {
          progressSupport.start(da.length());
        }
        while (!da.isEmpty()) {
          checkCancelled();
          da.readBytes(buf);
          buf.flip(); // post: position == 0
          // XXX I may not rely on returned number of bytes but track change in buf position instead.
         
          int consumed = sink.write(buf);
          if ((consumed == 0 || consumed != buf.position()) && logFacility != null) {
            logFacility.dump(getClass(), Warn, "Bad data sink when reading revision %d. Reported %d bytes consumed, byt actually read %d", revisionNumber, consumed, buf.position());
          }
          if (buf.position() == 0) {
            throw new HgInvalidStateException("Bad sink implementation (consumes no bytes) results in endless loop");
          }
          buf.compact(); // ensure (a) there's space for new (b) data starts at 0
          progressSupport.worked(consumed);
        }
        progressSupport.done(); // XXX shall specify whether #done() is invoked always or only if completed successfully.
      } catch (IOException ex) {
        recordFailure(ex);
      } catch (CancelledException ex) {
        recordFailure(ex);
      }
View Full Code Here

   */
  public void workingCopy(ByteChannel sink) throws CancelledException, HgRuntimeException {
    File f = getRepo().getFile(this);
    if (f.exists()) {
      final CancelSupport cs = CancelSupport.Factory.get(sink);
      final ProgressSupport progress = ProgressSupport.Factory.get(sink);
      final long flength = f.length();
      final int bsize = (int) Math.min(flength, 32*1024);
      progress.start((int) (flength > Integer.MAX_VALUE ? flength >>> 15 /*32 kb buf size*/ : flength));
      ByteBuffer buf = ByteBuffer.allocate(bsize);
      FileInputStream fis = null;
      try {
        fis = new FileInputStream(f);
        FileChannel fc = fis.getChannel();
        while (fc.read(buf) != -1) {
          cs.checkCancelled();
          buf.flip();
          int consumed = sink.write(buf);
          progress.worked(flength > Integer.MAX_VALUE ? 1 : consumed);
          buf.compact();
        }
      } catch (IOException ex) {
        throw new HgInvalidFileException("Working copy read failed", ex, f);
      } finally {
        progress.done();
        if (fis != null) {
          new FileUtils(getRepo().getSessionContext().getLog(), this).closeQuietly(fis);
        }
      }
    } else {
View Full Code Here

   */
  public void executeFull(final HgChangesetHandler handler) throws HgCallbackTargetException, HgException, CancelledException {
    if (handler == null) {
      throw new IllegalArgumentException("Delegate can't be null");
    }
    final ProgressSupport ps = getProgressSupport(handler);
    try {
      final List<Nodeid> common = getCommon();
      HgBundle changegroup = remoteRepo.getChanges(common);
      final ChangesetTransformer transformer = new ChangesetTransformer(localRepo, handler, getParentHelper(), ps, getCancelSupport(handler, true));
      transformer.limitBranches(branches);
      changegroup.changes(localRepo, new HgChangelog.Inspector() {
        private int localIndex;
        private final HgParentChildMap<HgChangelog> parentHelper;
     
        {
          parentHelper = getParentHelper();
          // new revisions, if any, would be added after all existing, and would get numbered started with last+1
          localIndex = localRepo.getChangelog().getRevisionCount();
        }
       
        public void next(int revisionNumber, Nodeid nodeid, RawChangeset cset) throws HgRuntimeException {
          if (parentHelper.knownNode(nodeid)) {
            // FIXME getCommon() and remote.changegroup do not work together nicely.
            // e.g. for hgtest-annotate-merge repository and TestIncoming, common reports r0 and r5 (ancestor of r5)
            // because there's a distinct branch from r0 (in addition to those after r5).
            // remote.changegroup however answers with revisions that are children of either,
            /// so revisions 0..5 are reported as well and the next check fails. Instead, shall pass
            // not common, but 'first to load' to remote.changegroup() or use another method (e.g. getbundle)
            // Note, sending r5 only (i.e. checking for ancestors in common) won't help, changegroup sends children of
            // requested roots only, and doesn't look for anything else
//            if (!common.contains(nodeid)) {
//              throw new HgInvalidStateException("Bundle shall not report known nodes other than roots we've supplied");
//            }
            return;
          }
          transformer.next(localIndex++, nodeid, cset);
        }
      });
      transformer.checkFailure();
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      ps.done();
    }
  }
View Full Code Here

   * @throws CancelledException if execution of the command was cancelled
   * @throws HgException subclass thereof to indicate specific issue with the command arguments or repository state
   */
  public void executeDiff(HgBlameInspector insp) throws HgCallbackTargetException, CancelledException, HgException {
    checkFile();
    final ProgressSupport progress = getProgressSupport(insp);
    progress.start(2);
    try {
      final int startRevIndex = clogRevIndexStart.get(0);
      final int endRevIndex = clogRevIndexEnd.get(TIP);
      final CancelSupport cancel = getCancelSupport(insp, true);
      int fileRevIndex1 = fileRevIndex(df, startRevIndex);
      int fileRevIndex2 = fileRevIndex(df, endRevIndex);
      BlameHelper bh = new BlameHelper(insp);
      bh.prepare(df, startRevIndex, endRevIndex);
      progress.worked(1);
      cancel.checkCancelled();
      bh.diff(fileRevIndex1, startRevIndex, fileRevIndex2, endRevIndex);
      progress.worked(1);
      cancel.checkCancelled();
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      progress.done();
    }
  }
View Full Code Here

    remoteRepo = hgRemote;
    return this;
  }

  public void execute() throws HgRemoteConnectionException, HgIOException, CancelledException, HgLibraryFailureException {
    final ProgressSupport progress = getProgressSupport(null);
    try {
      progress.start(100);
      //
      // find out missing
      // TODO refactor same code in HgOutgoingCommand #getComparator and #getParentHelper
      final HgChangelog clog = repo.getChangelog();
      final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
      parentHelper.init();
      final Internals implRepo = HgInternals.getImplementationRepo(repo);
      final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
      final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remoteRepo);
      comparator.compare(new ProgressSupport.Sub(progress, 50), getCancelSupport(null, true));
      List<Nodeid> l = comparator.getLocalOnlyRevisions();
      if (phaseHelper.isCapableOfPhases() && phaseHelper.withSecretRoots()) {
        RevisionSet secret = phaseHelper.allSecret();
        outgoing = new RevisionSet(l).subtract(secret);
      } else {
        outgoing = new RevisionSet(l);
      }
      HgBundle b = null;
      if (!outgoing.isEmpty()) {
        //
        // prepare bundle
        BundleGenerator bg = new BundleGenerator(implRepo);
        File bundleFile = bg.create(outgoing.asList());
        progress.worked(20);
        b = new HgLookup(repo.getSessionContext()).loadBundle(bundleFile);
        //
        // send changes
        remoteRepo.unbundle(b, comparator.getRemoteHeads());
      } // update phase information nevertheless
      progress.worked(20);
      //
      // update phase information
      if (phaseHelper.isCapableOfPhases()) {
        HgRemoteRepository.Phases remotePhases = remoteRepo.getPhases();
        RevisionSet remoteDraftsLocalPublic = phaseHelper.synchronizeWithRemote(remotePhases, outgoing);
        if (!remoteDraftsLocalPublic.isEmpty()) {
          // foreach remoteDraftsLocallyPublic.heads() do push Draft->Public
          for (Nodeid n : remoteDraftsLocalPublic.heads(parentHelper)) {
            try {
              Outcome upo = remoteRepo.updatePhase(HgPhase.Draft, HgPhase.Public, n);
              if (!upo.isOk()) {
                implRepo.getLog().dump(getClass(), Severity.Info, "Failed to update remote phase, reason: %s", upo.getMessage());
              }
            } catch (HgRemoteConnectionException ex) {
              implRepo.getLog().dump(getClass(), Severity.Error, ex, String.format("Failed to update phase of %s", n.shortNotation()));
            }
          }
        }
      }
      progress.worked(5);
      //
      // update bookmark information
      HgBookmarks localBookmarks = repo.getBookmarks();
      if (!localBookmarks.getAllBookmarks().isEmpty()) {
        for (Pair<String,Nodeid> bm : remoteRepo.getBookmarks()) {
          Nodeid localRevision = localBookmarks.getRevision(bm.first());
          if (localRevision == null || !parentHelper.knownNode(bm.second())) {
            continue;
          }
          // we know both localRevision and revision of remote bookmark,
          // need to make sure we don't push  older revision than it's at the server
          if (parentHelper.isChild(bm.second(), localRevision)) {
            remoteRepo.updateBookmark(bm.first(), bm.second(), localRevision);
          }
        }
      }
      // XXX WTF is obsolete in namespaces key??
      progress.worked(5);
      if (b != null) {
        b.unlink(); // keep the file only in case of failure
      }
    } catch (IOException ex) {
      throw new HgIOException(ex.getMessage(), null); // XXX not a nice idea to throw IOException from BundleGenerator#create
    } catch (HgRepositoryNotFoundException ex) {
      final HgInvalidStateException e = new HgInvalidStateException("Failed to load a just-created bundle");
      e.initCause(ex);
      throw new HgLibraryFailureException(e);
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      progress.done();
    }
  }
View Full Code Here

   */
  public void execute() throws HgException, HgRepositoryLockException, CancelledException {
    final HgRepositoryLock wdLock = repo.getWorkingDirLock();
    wdLock.acquire();
    try {
      final ProgressSupport progress = getProgressSupport(null);
      final CancelSupport cancellation = getCancelSupport(null, true);
      cancellation.checkCancelled();
      progress.start(2 + toAdd.size() + toRemove.size());
      Internals implRepo = Internals.getInstance(repo);
      final DirstateBuilder dirstateBuilder = new DirstateBuilder(implRepo);
      dirstateBuilder.fillFrom(new DirstateReader(implRepo, new Path.SimpleSource()));
      progress.worked(1);
      cancellation.checkCancelled();
      for (Path p : toAdd) {
        dirstateBuilder.recordAdded(p, Flags.RegularFile, -1);
        progress.worked(1);
        cancellation.checkCancelled();
      }
      for (Path p : toRemove) {
        dirstateBuilder.recordRemoved(p);
        progress.worked(1);
        cancellation.checkCancelled();
      }
      Transaction.Factory trFactory = implRepo.getTransactionFactory();
      Transaction tr = trFactory.create(repo);
      try {
        dirstateBuilder.serialize(tr);
        tr.commit();
      } catch (RuntimeException ex) {
        tr.rollback();
        throw ex;
      } catch (HgException ex) {
        tr.rollback();
        throw ex;
      }
      progress.worked(1);
      progress.done();
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      wdLock.release();
    }
View Full Code Here

   */
  public void execute() throws HgException, CancelledException {
    final HgRepositoryLock wdLock = repo.getWorkingDirLock();
    wdLock.acquire();
    try {
      final ProgressSupport progress = getProgressSupport(null);
      final CancelSupport cancellation = getCancelSupport(null, true);
      cancellation.checkCancelled();
      progress.start(files.size() + 2);
      final int csetRevision;
      if (changesetToCheckout.get() == HgRepository.WORKING_COPY) {
        csetRevision = repo.getChangelog().getRevisionIndex(repo.getWorkingCopyParents().first());
      } else {
        csetRevision = changesetToCheckout.get();
      }
      Internals implRepo = Internals.getInstance(repo);
      final DirstateBuilder dirstateBuilder = new DirstateBuilder(implRepo);
      dirstateBuilder.fillFrom(new DirstateReader(implRepo, new Path.SimpleSource()));
      progress.worked(1);
      cancellation.checkCancelled();
     
      final HgCheckoutCommand.CheckoutWorker worker = new HgCheckoutCommand.CheckoutWorker(implRepo);
     
      HgManifest.Inspector insp = new HgManifest.Inspector() {
       
        public boolean next(Nodeid nid, Path fname, Flags flags) {
          if (worker.next(nid, fname, flags)) {
            dirstateBuilder.recordUncertain(fname);
            return true;
          }
          return false;
        }
       
        public boolean end(int manifestRevision) {
          return false;
        }
       
        public boolean begin(int mainfestRevision, Nodeid nid, int changelogRevision) {
          return true;
        }
      };

      for (Path file : files) {
        File f = new File(repo.getWorkingDir(), file.toString());
        if (f.isFile()) {
          if (keepOriginal) {
            File copy = new File(f.getParentFile(), f.getName() + ".orig");
            if (copy.exists()) {
              copy.delete();
            }
            f.renameTo(copy);
          } else {
            f.delete();
          }
        }
        repo.getManifest().walkFileRevisions(file, insp, csetRevision);
        worker.checkFailed();
        progress.worked(1);
        cancellation.checkCancelled();
      }
      Transaction.Factory trFactory = implRepo.getTransactionFactory();
      Transaction tr = trFactory.create(repo);
      try {
        // TODO same code in HgAddRemoveCommand and similar in HgCommitCommand
        dirstateBuilder.serialize(tr);
        tr.commit();
      } catch (RuntimeException ex) {
        tr.rollback();
        throw ex;
      } catch (HgException ex) {
        tr.rollback();
        throw ex;
      }
      progress.worked(1);
      progress.done();
    } catch (HgRuntimeException ex) {
      throw new HgLibraryFailureException(ex);
    } finally {
      wdLock.release();
    }
View Full Code Here

TOP

Related Classes of org.tmatesoft.hg.util.ProgressSupport

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.