Package org.apache.accumulo.server.zookeeper

Examples of org.apache.accumulo.server.zookeeper.DistributedWorkQueue


      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes());
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, BulkImport.FAILURES_TXT), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here


    }
    announceExistence();

    ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");

    bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
    }

    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);

      HashSet<String> workIds = new HashSet<String>();

      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());

        if (fs.exists(dest))
          continue;

        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(Constants.UTF8));
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }

      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

   
    ThreadPoolExecutor distWorkQThreadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
        ServerConfiguration.getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS),
        new NamingThreadFactory("distributed work queue"));

    bulkFailedCopyQ = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes());
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, "failures.txt"), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes());
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, "failures.txt"), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

   
    ThreadPoolExecutor distWorkQThreadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(
        ServerConfiguration.getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS),
        new NamingThreadFactory("distributed work queue"));

    bulkFailedCopyQ = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(Constants.UTF8));
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, BulkImport.FAILURES_TXT), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

    clientAddress = new InetSocketAddress(clientAddress.getAddress(), clientPort);
    announceExistence();
   
    ThreadPoolExecutor distWorkQThreadPool = new SimpleThreadPool(getSystemConfiguration().getCount(Property.TSERV_WORKQ_THREADS), "distributed work queue");
   
    bulkFailedCopyQ = new DistributedWorkQueue(ZooUtil.getRoot(instance) + Constants.ZBULK_FAILED_COPYQ);
    try {
      bulkFailedCopyQ.startProcessing(new BulkFailedCopyProcessor(), distWorkQThreadPool);
    } catch (Exception e1) {
      throw new RuntimeException("Failed to start distributed work queue for copying ", e1);
    }
View Full Code Here

      fs.rename(orig, dest);
      log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": failed");
    }
   
    if (loadedFailures.size() > 0) {
      DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + HdfsZooInstance.getInstance().getInstanceID()
          + Constants.ZBULK_FAILED_COPYQ);
     
      HashSet<String> workIds = new HashSet<String>();
     
      for (String failure : loadedFailures.values()) {
        Path orig = new Path(failure);
        Path dest = new Path(error, orig.getName());
       
        if (fs.exists(dest))
          continue;
       
        bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes());
        workIds.add(orig.getName());
        log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
      }
     
      bifCopyQueue.waitUntilDone(workIds);
    }

    fs.delete(new Path(error, "failures.txt"), true);
    return new CleanUpBulkImport(tableId, source, bulk, error);
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.server.zookeeper.DistributedWorkQueue

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.