Package org.apache.hadoop.util.ResourceCalculatorPlugin

Examples of org.apache.hadoop.util.ResourceCalculatorPlugin.ProcResourceValues


  /*
   * return the value of ProcResourceValues for later use
   */
  protected ProcResourceValues sortReduceParts() {
    long sortStartMilli = System.currentTimeMillis();
    ProcResourceValues sortStartProcVals =
        task.getCurrentProcResourceValues();
    // sort
    for (int i = 0; i < reducePartitions.length; i++) {
      reducePartitions[i].groupOrSort();
    }
    long sortEndMilli = System.currentTimeMillis();
    ProcResourceValues sortEndProcVals =
        task.getCurrentProcResourceValues();
    mapSpillSortCounter.incCountersPerSort(sortStartProcVals,
        sortEndProcVals, sortEndMilli - sortStartMilli);
    return sortEndProcVals;
  }
View Full Code Here


    return sortEndProcVals;
  }

  @Override
  public void sortAndSpill() throws IOException {
    ProcResourceValues sortEndProcVals = sortReduceParts();
    long sortEndMilli = System.currentTimeMillis();
    // spill
    FSDataOutputStream out = null;
    long spillBytes = 0;
    try {
      // create spill file
      final SpillRecord spillRec = new SpillRecord(partitions);
      final Path filename =
          task.mapOutputFile
              .getSpillFileForWrite(getTaskID(), numSpills,
                  this.memoryBlockAllocator.getEstimatedSize());
      out = rfs.create(filename);
      for (int i = 0; i < partitions; ++i) {
        IndexRecord rec =
            reducePartitions[i].spill(job, out, keyClass, valClass,
                codec, task.spilledRecordsCounter);
        // record offsets
        spillBytes += rec.partLength;
        spillRec.putIndex(rec, i);
      }

      if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) {
        // create spill index file
        Path indexFilename =
            task.mapOutputFile.getSpillIndexFileForWrite(getTaskID(),
                numSpills, partitions
                    * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
        spillRec.writeToFile(indexFilename, job);
      } else {
        indexCacheList.add(spillRec);
        totalIndexCacheMemory +=
            spillRec.size() * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
      }
      LOG.info("Finished spill " + numSpills);
      ++numSpills;
    } finally {
      if (out != null)
        out.close();
    }

    long spillEndMilli = System.currentTimeMillis();
    ProcResourceValues spillEndProcVals =
        task.getCurrentProcResourceValues();
    mapSpillSortCounter.incCountersPerSpill(sortEndProcVals,
        spillEndProcVals, spillEndMilli - sortEndMilli, spillBytes);
  }
View Full Code Here

  }

  public void spillSingleRecord(K key, V value, int part)
      throws IOException {

    ProcResourceValues spillStartProcVals =
        task.getCurrentProcResourceValues();
    long spillStartMilli = System.currentTimeMillis();
    // spill
    FSDataOutputStream out = null;
    long spillBytes = 0;
    try {
      // create spill file
      final SpillRecord spillRec = new SpillRecord(partitions);
      final Path filename =
          task.mapOutputFile.getSpillFileForWrite(getTaskID(),
              numSpills, key.getLength() + value.getLength());
      out = rfs.create(filename);
      IndexRecord rec = new IndexRecord();
      for (int i = 0; i < partitions; ++i) {
        IFile.Writer<K, V> writer = null;
        try {
          long segmentStart = out.getPos();
          // Create a new codec, don't care!
          writer =
              new IFile.Writer<K, V>(job, out, keyClass, valClass,
                  codec, task.spilledRecordsCounter);
          if (i == part) {
            final long recordStart = out.getPos();
            writer.append(key, value);
            // Note that our map byte count will not be accurate with
            // compression
            mapOutputByteCounter
                .increment(out.getPos() - recordStart);
          }
          writer.close();

          // record offsets
          rec.startOffset = segmentStart;
          rec.rawLength = writer.getRawLength();
          rec.partLength = writer.getCompressedLength();
          spillBytes += writer.getCompressedLength();
          spillRec.putIndex(rec, i);
          writer = null;
        } catch (IOException e) {
          if (null != writer)
            writer.close();
          throw e;
        }
      }

      if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) {
        // create spill index file
        Path indexFilename =
            task.mapOutputFile.getSpillIndexFileForWrite(getTaskID(),
                numSpills, partitions
                    * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH);
        spillRec.writeToFile(indexFilename, job);
      } else {
        indexCacheList.add(spillRec);
        totalIndexCacheMemory +=
            spillRec.size() * MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH;
      }
     
      LOG.info("Finished spill big record " + numBigRecordsSpills);
      ++numBigRecordsSpills;
      ++numSpills;
    } finally {
      if (out != null)
        out.close();
    }

    long spillEndMilli = System.currentTimeMillis();
    ProcResourceValues spillEndProcVals =
        task.getCurrentProcResourceValues();
    mapSpillSortCounter.incCountersPerSpill(spillStartProcVals,
        spillEndProcVals, spillEndMilli - spillStartMilli, spillBytes);
    mapSpillSortCounter.incSpillSingleRecord();
  }
View Full Code Here

      hasInMemorySpill=true;
    } else {
      sortAndSpill();     
    }
    long mergeStartMilli = System.currentTimeMillis();
    ProcResourceValues mergeStartProcVals = task.getCurrentProcResourceValues();
    mergeParts();
    long mergeEndMilli = System.currentTimeMillis();
    ProcResourceValues mergeEndProcVals = task.getCurrentProcResourceValues();
    mapSpillSortCounter.incMergeCounters(mergeStartProcVals, mergeEndProcVals,
        mergeEndMilli - mergeStartMilli);
  }
View Full Code Here

            ).initCause(e);
      }
      // release sort buffer before the merge
      kvbuffer = null;
      long mergeStartMilli = System.currentTimeMillis();
      ProcResourceValues mergeStartProcVals = getCurrentProcResourceValues();
      mergeParts();
      long mergeEndMilli = System.currentTimeMillis();
      ProcResourceValues mergeEndProcVals = getCurrentProcResourceValues();
      spillSortCounters.incMergeCounters(mergeStartProcVals, mergeEndProcVals,
          mergeEndMilli - mergeStartMilli);
     
    }
View Full Code Here

          ? kvend
          : kvoffsets.length + kvend;
       
        //record the cumulative resources used before running sort
        long sortStartMilli = System.currentTimeMillis();
        ProcResourceValues sortStartProcVals = getCurrentProcResourceValues();
        //do the sort
        sorter.sort(MapOutputBuffer.this, kvstart, endPosition, reporter);
        // get the cumulative resources used after the sort, and use the diff as
        // resources/wallclock consumed by the sort.
        long sortEndMilli = System.currentTimeMillis();
        ProcResourceValues sortEndProcVals = getCurrentProcResourceValues();

        spillSortCounters.incCountersPerSort(sortStartProcVals,
            sortEndProcVals, sortEndMilli - sortStartMilli);

        int spindex = kvstart;
        IndexRecord rec = new IndexRecord();
        InMemValBytes value = new InMemValBytes();
        long spillBytes = 0;
        for (int i = 0; i < partitions; ++i) {
          IFile.Writer<K, V> writer = null;
          try {
            long segmentStart = out.getPos();
            writer = new Writer<K, V>(job, out, keyClass, valClass, codec,
                                      spilledRecordsCounter);
            if (combinerRunner == null) {
              // spill directly
              DataInputBuffer key = new DataInputBuffer();
              while (spindex < endPosition &&
                  kvindices[kvoffsets[spindex % kvoffsets.length]
                            + PARTITION] == i) {
                final int kvoff = kvoffsets[spindex % kvoffsets.length];
                getVBytesForOffset(kvoff, value);
                key.reset(kvbuffer, kvindices[kvoff + KEYSTART],
                          (kvindices[kvoff + VALSTART] -
                           kvindices[kvoff + KEYSTART]));
                writer.append(key, value);
                ++spindex;
              }
            } else {
              int spstart = spindex;
              while (spindex < endPosition &&
                  kvindices[kvoffsets[spindex % kvoffsets.length]
                            + PARTITION] == i) {
                ++spindex;
              }
              // Note: we would like to avoid the combiner if we've fewer
              // than some threshold of records for a partition
              if (spstart != spindex) {
                combineCollector.setWriter(writer);
                RawKeyValueIterator kvIter =
                  new MRResultIterator(spstart, spindex);
                combinerRunner.combine(kvIter, combineCollector);
              }
            }

            // close the writer
            writer.close();

            // record offsets
            rec.startOffset = segmentStart;
            rec.rawLength = writer.getRawLength();
            rec.partLength = writer.getCompressedLength();
            spillBytes += writer.getCompressedLength();
            spillRec.putIndex(rec, i);

            writer = null;
          } finally {
            if (null != writer) writer.close();
          }
        }

        if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) {
          // create spill index file
          Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(
              getTaskID(), numSpills,
              partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH);
          spillRec.writeToFile(indexFilename, job);
        } else {
          indexCacheList.add(spillRec);
          totalIndexCacheMemory +=
            spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
        }
       
        long spillEndMilli = System.currentTimeMillis();
        ProcResourceValues spillEndProcVals = getCurrentProcResourceValues();
        spillSortCounters.incCountersPerSpill(sortEndProcVals,
            spillEndProcVals, spillEndMilli - sortEndMilli, spillBytes);
       
        LOG.info("Finished spill " + numSpills);
        ++numSpills;
View Full Code Here

      long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH;
      FSDataOutputStream out = null;
      try {
       
        long spillStartMilli = System.currentTimeMillis();
        ProcResourceValues spillStartProcVals = getCurrentProcResourceValues();
        long spillBytes = 0;
       
        // create spill file
        final SpillRecord spillRec = new SpillRecord(partitions);
        final Path filename = mapOutputFile.getSpillFileForWrite(getTaskID(),
            numSpills, size);
        out = rfs.create(filename);
       
        // we don't run the combiner for a single record
        IndexRecord rec = new IndexRecord();
        for (int i = 0; i < partitions; ++i) {
          IFile.Writer<K, V> writer = null;
          try {
            long segmentStart = out.getPos();
            // Create a new codec, don't care!
            writer = new IFile.Writer<K,V>(job, out, keyClass, valClass, codec,
                                            spilledRecordsCounter);

            if (i == partition) {
              final long recordStart = out.getPos();
              writer.append(key, value);
              // Note that our map byte count will not be accurate with
              // compression
              mapOutputByteCounter.increment(out.getPos() - recordStart);
            }
            writer.close();

            // record offsets
            rec.startOffset = segmentStart;
            rec.rawLength = writer.getRawLength();
            rec.partLength = writer.getCompressedLength();
            spillBytes += writer.getCompressedLength();
            spillRec.putIndex(rec, i);

            writer = null;
          } catch (IOException e) {
            if (null != writer) writer.close();
            throw e;
          }
        }
        if (totalIndexCacheMemory >= INDEX_CACHE_MEMORY_LIMIT) {
          // create spill index file
          Path indexFilename = mapOutputFile.getSpillIndexFileForWrite(
              getTaskID(), numSpills,
              partitions * MAP_OUTPUT_INDEX_RECORD_LENGTH);
          spillRec.writeToFile(indexFilename, job);
        } else {
          indexCacheList.add(spillRec);
          totalIndexCacheMemory +=
            spillRec.size() * MAP_OUTPUT_INDEX_RECORD_LENGTH;
        }
       
        long spillEndMilli = System.currentTimeMillis();
        ProcResourceValues spillEndProcVals = getCurrentProcResourceValues();       
        spillSortCounters.incCountersPerSpill(spillStartProcVals,
            spillEndProcVals, spillEndMilli - spillStartMilli, spillBytes);
        ++numSpills;
      } finally {
        if (out != null) out.close();
View Full Code Here

    // Initialize the codec
    codec = initCodec();

    boolean isLocal = "local".equals(job.get("mapred.job.tracker", "local"));
    long reduceCopyStartMilli = System.currentTimeMillis();
    ProcResourceValues copyStartProcVals = getCurrentProcResourceValues();
    if (!isLocal) {
      reduceCopier = new ReduceCopier(umbilical, job, reporter);
      if (!reduceCopier.fetchOutputs()) {
        if(reduceCopier.mergeThrowable instanceof FSError) {
          throw (FSError)reduceCopier.mergeThrowable;
        }
        throw new IOException("Task: " + getTaskID() +
            " - The reduce copier failed", reduceCopier.mergeThrowable);
      }
    }
    long reducerCopyEndMilli = System.currentTimeMillis();
    ProcResourceValues copyEndProcVals = getCurrentProcResourceValues();

    copyPhase.complete();                         // copy is already complete
    setPhase(TaskStatus.Phase.SORT);
    statusUpdate(umbilical);

    final FileSystem rfs = FileSystem.getLocal(job).getRaw();
    RawKeyValueIterator rIter = isLocal
      ? Merger.merge(job, rfs, job.getMapOutputKeyClass(),
          job.getMapOutputValueClass(), codec, getMapFiles(rfs, true),
          !conf.getKeepFailedTaskFiles(), job.getInt("io.sort.factor", 100),
          new Path(getTaskID().toString()), job.getOutputKeyComparator(),
          reporter, spilledRecordsCounter, null)
      : reduceCopier.createKVIterator(job, rfs, reporter);

    // free up the data structures
    mapOutputFilesOnDisk.clear();

    long sortEndMilli = System.currentTimeMillis();
    ProcResourceValues sortEndProcVals = getCurrentProcResourceValues();

    sortPhase.complete();                         // sort is complete
    setPhase(TaskStatus.Phase.REDUCE);
    statusUpdate(umbilical);
    Class keyClass = job.getMapOutputKeyClass();
View Full Code Here

     updateHeapUsageCounter();
    
     if (resourceCalculator == null) {
       return;
     }
     ProcResourceValues res = resourceCalculator.getProcResourceValues();
     long cpuTime = res.getCumulativeCpuTime();
     long pMem = res.getPhysicalMemorySize();
     long vMem = res.getVirtualMemorySize();
     // Remove the CPU time consumed previously by JVM reuse
     cpuTime -= initCpuCumulativeTime;
     counters.findCounter(Counter.CPU_MILLISECONDS).setValue(cpuTime);
     counters.findCounter(Counter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
     counters.findCounter(Counter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
View Full Code Here

     updateHeapUsageCounter();
    
     if (resourceCalculator == null) {
       return;
     }
     ProcResourceValues res = resourceCalculator.getProcResourceValues();
     long cpuTime = res.getCumulativeCpuTime();
     long pMem = res.getPhysicalMemorySize();
     long vMem = res.getVirtualMemorySize();
     // Remove the CPU time consumed previously by JVM reuse
     cpuTime -= initCpuCumulativeTime;
     counters.findCounter(Counter.CPU_MILLISECONDS).setValue(cpuTime);
     counters.findCounter(Counter.PHYSICAL_MEMORY_BYTES).setValue(pMem);
     counters.findCounter(Counter.VIRTUAL_MEMORY_BYTES).setValue(vMem);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.util.ResourceCalculatorPlugin.ProcResourceValues

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.