Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.LocalDirAllocator


    tt.setMaxReduceSlots(REDUCE_SLOTS);
    TaskController dtc;
    tt.setTaskController((dtc = new DefaultTaskController()));
    Configuration conf = new Configuration();
    dtc.setConf(conf);
    LocalDirAllocator ldirAlloc =
        new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
    tt.getTaskController().setup(ldirAlloc, new LocalStorage(ttConf.getLocalDirs()));
    JobID jobId = new JobID("test", 0);
    jvmManager = new JvmManager(tt);
    tt.setJvmManagerInstance(jvmManager);
    tt.setUserLogManager(new UserLogManager(ttConf));
View Full Code Here


    this.allocatedCoreNum = context.getConf().getIntVar(ConfVars.EXECUTOR_EXTERNAL_SORT_THREAD_NUM);
    this.executorService = Executors.newFixedThreadPool(this.allocatedCoreNum);
    this.inMemoryTable = new ArrayList<Tuple>(100000);

    this.sortTmpDir = getExecutorTmpDir();
    localDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);
    localFS = new RawLocalFileSystem();
  }
View Full Code Here

      // the base dir for an output dir
      baseDir = queryId.toString() + "/output" + "/" + executionBlockId.getId();

      // initialize LocalDirAllocator
      lDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);

      baseDirPath = localFS.makeQualified(lDirAllocator.getLocalPathForWrite(baseDir, conf));
      LOG.info("TaskRunner basedir is created (" + baseDir +")");

      // Setup QueryEngine according to the query plan
View Full Code Here

    }

    protected void cleanup(String strPath) {
      if(deletionService == null) return;

      LocalDirAllocator lDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);

      try {
        Iterable<Path> iter = lDirAllocator.getAllLocalPathsToRead(strPath, systemConf);
        FileSystem localFS = FileSystem.getLocal(systemConf);
        for (Path path : iter){
          deletionService.delete(localFS.makeQualified(path));
        }
      } catch (IOException e) {
View Full Code Here

    }

    protected void cleanupTemporalDirectories() {
      if(deletionService == null) return;

      LocalDirAllocator lDirAllocator = new LocalDirAllocator(ConfVars.WORKER_TEMPORAL_DIR.varname);

      try {
        Iterable<Path> iter = lDirAllocator.getAllLocalPathsToRead(".", systemConf);
        FileSystem localFS = FileSystem.getLocal(systemConf);
        for (Path path : iter){
          PathData[] items = PathData.expandAsGlob(localFS.makeQualified(new Path(path, "*")).toString(), systemConf);

          ArrayList<Path> paths = new ArrayList<Path>();
View Full Code Here

  public TrackerDistributedCacheManager(Configuration conf,
      TaskController taskController) throws IOException {
    this.localFs = FileSystem.getLocal(conf);
    this.trackerConf = conf;
    this.lDirAllocator = new LocalDirAllocator(TTConfig.LOCAL_DIR);
    this.taskController = taskController;
      // setting the cache size to a default of 10GB
    this.allowedCacheSize = conf.getLong(TTConfig.TT_LOCAL_CACHE_SIZE,
          DEFAULT_CACHE_SIZE);
      // setting the cache number of subdirectories limit to a default of 10000
View Full Code Here

        // create the MergeStreams from the sorted map created in the constructor
        // and dump the final output to a file
        int numSegments = sortedSegmentSizes.size();
        int origFactor = factor;
        int passNo = 1;
        LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
        do {
          // get the factor for this pass of merge
          factor = getPassFactor(passNo, numSegments);
          List<SegmentDescriptor> segmentsToMerge = new ArrayList<SegmentDescriptor>();
          int segmentsConsidered = 0;
          int numSegmentsToConsider = factor;
          while (true) {
            // extract the smallest 'factor' number of segment pointers from the
            // TreeMap. Call cleanup on the empty segments (no key/value data)
            SegmentDescriptor[] mStream = getSegmentDescriptors(numSegmentsToConsider);
            for (int i = 0; i < mStream.length; i++) {
              if (mStream[i].nextRawKey()) {
                segmentsToMerge.add(mStream[i]);
                segmentsConsidered++;
                // Count the fact that we read some bytes in calling nextRawKey()
                updateProgress(mStream[i].in.getPosition());
              } else {
                mStream[i].cleanup();
                numSegments--; // we ignore this segment for the merge
              }
            }
            // if we have the desired number of segments
            // or looked at all available segments, we break
            if (segmentsConsidered == factor || sortedSegmentSizes.size() == 0) {
              break;
            }
           
            numSegmentsToConsider = factor - segmentsConsidered;
          }
          // feed the streams to the priority queue
          initialize(segmentsToMerge.size());
          clear();
          for (int i = 0; i < segmentsToMerge.size(); i++) {
            put(segmentsToMerge.get(i));
          }
          // if we have lesser number of segments remaining, then just return the
          // iterator, else do another single level merge
          if (numSegments <= factor) {
            // calculate the length of the remaining segments. Required for
            // calculating the merge progress
            long totalBytes = 0;
            for (int i = 0; i < segmentsToMerge.size(); i++) {
              totalBytes += segmentsToMerge.get(i).segmentLength;
            }
            if (totalBytes != 0) // being paranoid
              progPerByte = 1.0f / (float) totalBytes;
            // reset factor to what it originally was
            factor = origFactor;
            return this;
          }
          // we want to spread the creation of temp files on multiple disks if
          // available under the space constraints
          long approxOutputSize = 0;
          for (SegmentDescriptor s : segmentsToMerge) {
            approxOutputSize += s.segmentLength + ChecksumFileSystem.getApproxChkSumLength(s.segmentLength);
          }
          Path tmpFilename = new Path(tmpDir, "intermediate").suffix("." + passNo);
         
          Path outputFile = lDirAlloc.getLocalPathForWrite(tmpFilename.toString(), approxOutputSize, conf);
          LOG.debug("writing intermediate results to " + outputFile);
          Writer writer = cloneFileAttributes(fs.makeQualified(segmentsToMerge.get(0).segmentPathName), fs.makeQualified(outputFile), null);
          writer.sync = null; // disable sync for temp files
          writeFile(this, writer);
          writer.close();
View Full Code Here

        shuffleMetrics.serverHandlerBusy();
        if(ClientTraceLog.isInfoEnabled())
          startTime = System.nanoTime();
        outStream = response.getOutputStream();
        JobConf conf = (JobConf) context.getAttribute("conf");
        LocalDirAllocator lDirAlloc =
          (LocalDirAllocator)context.getAttribute("localDirAllocator");
        FileSystem rfs = ((LocalFileSystem)
            context.getAttribute("local.file.system")).getRaw();

      String userName = null;
      String runAsUserName = null;
      synchronized (tracker.runningJobs) {
        RunningJob rjob = tracker.runningJobs.get(JobID.forName(jobId));
        if (rjob == null) {
          throw new IOException("Unknown job " + jobId + "!!");
        }
        userName = rjob.jobConf.getUser();
        runAsUserName = tracker.getTaskController().getRunAsUser(rjob.jobConf);
      }
      // Index file
      String intermediateOutputDir = TaskTracker.getIntermediateOutputDir(userName, jobId, mapId);
      String indexKey = intermediateOutputDir + "/file.out.index";
      Path indexFileName = fileIndexCache.get(indexKey);
      if (indexFileName == null) {
        indexFileName = lDirAlloc.getLocalPathToRead(indexKey, conf);
        fileIndexCache.put(indexKey, indexFileName);
      }

      // Map-output file
      String fileKey = intermediateOutputDir + "/file.out";
      Path mapOutputFileName = fileCache.get(fileKey);
      if (mapOutputFileName == null) {
        mapOutputFileName = lDirAlloc.getLocalPathToRead(fileKey, conf);
        fileCache.put(fileKey, mapOutputFileName);
      }
      

        /**
 
View Full Code Here

    workerThreads = conf.getInt("tasktracker.http.threads", 40);
    server.setThreads(1, workerThreads);
    // let the jsp pages get to the task tracker, config, and other relevant
    // objects
    FileSystem local = FileSystem.getLocal(conf);
    this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
    Class<? extends TaskController> taskControllerClass =
      conf.getClass("mapred.task.tracker.task-controller",
                     DefaultTaskController.class, TaskController.class);

    fConf = new JobConf(conf);
View Full Code Here

    this.shuffleServerMetrics = new ShuffleServerMetrics(fConf);
    server.setThreads(1, workerThreads);
    // let the jsp pages get to the task tracker, config, and other relevant
    // objects
    FileSystem local = FileSystem.getLocal(conf);
    this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
    server.setAttribute("task.tracker", this);
    server.setAttribute("local.file.system", local);
    server.setAttribute("conf", conf);
    server.setAttribute("log", LOG);
    server.setAttribute("localDirAllocator", localDirAllocator);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.LocalDirAllocator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.