Package org.apache.accumulo.server.zookeeper

Examples of org.apache.accumulo.server.zookeeper.DistributedWorkQueue


      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(Constants.UTF8));
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, BulkImport.FAILURES_TXT), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here


  public RecoveryManager(Master master) {
    this.master = master;
    executor = Executors.newScheduledThreadPool(4, new NamingThreadFactory("Walog sort starter "));
    zooCache = new ZooCache();
    try {
      List<String> workIDs = new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).getWorkQueued();
      sortsQueued.addAll(workIDs);
    } catch (Exception e) {
      log.warn(e, e);
    }
  }
View Full Code Here

   
  }
 
  private void initiateSort(String host, final String file) throws KeeperException, InterruptedException {
    String source = getSource(host, file).toString();
    new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).addWork(file, source.getBytes(Constants.UTF8));
   
    synchronized (this) {
      sortsQueued.add(file);
    }
View Full Code Here

    clientAddress = new InetSocketAddress(clientAddress.getAddress(), clientPort);
    announceExistence();
   
    ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");
   
    bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes());
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, "failures.txt"), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

    announceExistence();

    ThreadPoolExecutor distWorkQThreadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
        ServerConfiguration.getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), new NamingThreadFactory("distributed work queue"));

    bulkFailedCopyQ = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

  public RecoveryManager(Master master) {
    this.master = master;
    executor = Executors.newScheduledThreadPool(4, new NamingThreadFactory("Walog sort starter "));
    zooCache = new ZooCache();
    try {
      List<String> workIDs = new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).getWorkQueued();
      sortsQueued.addAll(workIDs);
    } catch (Exception e) {
      log.warn(e, e);
    }
  }
View Full Code Here

   
  }
 
  private void initiateSort(String host, final String file) throws KeeperException, InterruptedException {
    String source = getSource(host, file).toString();
    new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY).addWork(file, source.getBytes());
   
    synchronized (this) {
      sortsQueued.add(file);
    }
View Full Code Here

    this.threadPool = new SimpleThreadPool(threadPoolSize, this.getClass().getName());
  }
 
  public void startWatchingForRecoveryLogs(ThreadPoolExecutor distWorkQThreadPool) throws KeeperException, InterruptedException {
    this.threadPool = distWorkQThreadPool;
    new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZRECOVERY).startProcessing(new LogProcessor(), this.threadPool);
  }
View Full Code Here

    clientAddress = new InetSocketAddress(clientAddress.getAddress(), clientPort);
    announceExistence();
   
    ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");
   
    bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.server.zookeeper.DistributedWorkQueue

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.