Examples of MetricsRecord


Examples of co.cask.cdap.metrics.transport.MetricsRecord

    // Currently the test framework only supports system metrics.
    if (scope != MetricsScope.SYSTEM) {
      return;
    }
    while (metrics.hasNext()) {
      MetricsRecord metricsRecord = metrics.next();
      String context = metricsRecord.getContext();
      // Remove the last part, which is the runID
      int idx = context.lastIndexOf('.');
      if (idx >= 0) {
        context = context.substring(0, idx);
      }
      RuntimeStats.count(String.format("%s.%s", context, metricsRecord.getName()), metricsRecord.getValue());
    }
  }
View Full Code Here

Examples of co.cask.cdap.metrics.transport.MetricsRecord

   * @throws OperationException When there is an error updating the table.
   */
  public void update(Iterator<MetricsRecord> records) throws OperationException {
    try {
      while (records.hasNext()) {
        MetricsRecord record = records.next();
        byte[] rowKey = getKey(record.getContext(), record.getName(), record.getRunId());
        Map<byte[], Long> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);

        // The no tag value
        increments.put(Bytes.toBytes(MetricsConstants.EMPTY_TAG), (long) record.getValue());

        // For each tag, increments corresponding values
        for (TagMetric tag : record.getTags()) {
          increments.put(Bytes.toBytes(tag.getTag()), (long) tag.getValue());
        }
        aggregatesTable.increment(rowKey, increments);
      }
    } catch (Exception e) {
View Full Code Here

Examples of co.cask.cdap.metrics.transport.MetricsRecord

    encoderOutputStream.reset();

    KafkaPublisher.Preparer preparer = publisher.prepare(topicPrefix + "." + scope.name().toLowerCase());
    while (metrics.hasNext()) {
      // Encode each MetricRecord into bytes and make it an individual kafka message in a message set.
      MetricsRecord record = metrics.next();
      recordWriter.encode(record, encoder);
      preparer.add(ByteBuffer.wrap(encoderOutputStream.toByteArray()), record.getContext());
      encoderOutputStream.reset();
    }

    preparer.send();
  }
View Full Code Here

Examples of co.cask.cdap.metrics.transport.MetricsRecord

          Map.Entry<EmitterKey, AggregatedMetricsEmitter> entry = iterator.next();
          if (entry.getKey().getCollectorKey().getScope() != scope) {
            continue;
          }

          MetricsRecord metricsRecord = entry.getValue().emit(timestamp);
          if (metricsRecord.getValue() != 0) {
            LOG.trace("Emit metric {}", metricsRecord);
            return metricsRecord;
          }
        }
        return endOfData();
View Full Code Here

Examples of co.cask.cdap.metrics.transport.MetricsRecord

    ImmutableList.Builder<TagMetric> builder = ImmutableList.builder();
    int value = this.value.getAndSet(0);
    for (Map.Entry<String, AtomicInteger> entry : tagValues.asMap().entrySet()) {
      builder.add(new TagMetric(entry.getKey(), entry.getValue().getAndSet(0)));
    }
    return new MetricsRecord(context, runId, name, builder.build(), timestamp, value);
  }
View Full Code Here

Examples of co.cask.cdap.metrics.transport.MetricsRecord


  @Override
  protected void publish(MetricsScope scope, Iterator<MetricsRecord> metrics) throws Exception {
    while (metrics.hasNext()) {
      MetricsRecord record = metrics.next();
      String context = record.getContext();

      // Context is expected to look like appId.b.programId.[m|r].[taskId]
      String counterGroup;
      String contextParts[] = splitPattern.split(context);
      //TODO: Refactor to support any context
      if (context.equals(Constants.Metrics.DATASET_CONTEXT)) {
        counterGroup = "cdap.dataset";
      } else if ("m".equals(contextParts[3])) {
        counterGroup = "cdap.mapper";
      } else if ("r".equals(contextParts[3])) {
        counterGroup = "cdap.reducer";
      } else {
        LOG.error("could not determine if the metric is a map or reduce metric from context {}, skipping...", context);
        continue;
      }

      counterGroup += "." + scope.name();

      String counterName = getCounterName(record.getName());
      taskContext.getCounter(counterGroup, counterName).increment(record.getValue());
      for (TagMetric tag : record.getTags()) {
        counterName = getCounterName(record.getName(), tag.getTag());
        if (counterName != null) {
          taskContext.getCounter(counterGroup, counterName).increment(tag.getValue());
        }
      }
    }
View Full Code Here

Examples of net.sf.katta.node.monitor.MetricsRecord

    String metricsPath = _zkConf.getZkPath(PathDef.NODE_METRICS, nodeName);
    try {
      _zkClient.writeData(metricsPath, metricsRecord);
    } catch (ZkNoNodeException e) {
      // TODO put in ephemeral map ?
      _zkClient.createEphemeral(metricsPath, new MetricsRecord(nodeName));
    } catch (Exception e) {
      // this only happens if zk is down
      LOG.debug("Can't write to zk", e);
    }
  }
View Full Code Here

Examples of net.sf.katta.node.monitor.MetricsRecord

  @Test(timeout = 7000)
  public void testMetrics() throws Exception {
    String nodeName1 = "node1";
    assertNull(_protocol.getMetric(nodeName1));
    _protocol.setMetric(nodeName1, new MetricsRecord(nodeName1));
    assertNotNull(_protocol.getMetric(nodeName1));

    String nodeName2 = "node2";
    _protocol.setMetric(nodeName2, new MetricsRecord(nodeName1));
    assertNotSame(_protocol.getMetric(nodeName1).getServerId(), _protocol.getMetric(nodeName2).getServerId());
  }
View Full Code Here

Examples of org.apache.hadoop.metrics.MetricsRecord

        poolToJobCounters.put(pool, poolCounters);
      }
      accumulateCounters(poolCounters, counters);

      if (!poolToMetricsRecord.containsKey(pool)) {
        MetricsRecord poolRecord = context.createRecord("pool-" + pool);
        poolToMetricsRecord.put(pool, poolRecord);
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.metrics.MetricsRecord

    // The gets + puts below are OK because only one thread is doing it.
    for (PoolGroupSchedulable poolGroup : poolGroupManager.getPoolGroups()) {
      int poolGroupSessions = 0;
      for (PoolSchedulable pool : poolGroup.getPools()) {
        MetricsRecord poolRecord =
            poolInfoToMetricsRecord.get(pool.getPoolInfo());
        if (poolRecord == null) {
          poolRecord = metrics.getContext().createRecord(
              "pool-" + pool.getName());
          poolInfoToMetricsRecord.put(pool.getPoolInfo(), poolRecord);
        }

        PoolInfoMetrics poolMetrics = new PoolInfoMetrics(pool.getPoolInfo(),
            type, poolRecord);
        poolMetrics.setCounter(
            MetricName.GRANTED, pool.getGranted());
        poolMetrics.setCounter(
            MetricName.REQUESTED, pool.getRequested());
        poolMetrics.setCounter(
            MetricName.SHARE, (long) pool.getShare());
        poolMetrics.setCounter(
            MetricName.MIN, pool.getMinimum());
        poolMetrics.setCounter(
            MetricName.MAX, pool.getMaximum());
        poolMetrics.setCounter(
            MetricName.WEIGHT, (long) pool.getWeight());
        poolMetrics.setCounter(
            MetricName.SESSIONS, pool.getScheduleQueue().size());
        poolMetrics.setCounter(
            MetricName.STARVING, pool.getStarvingTime(now) / 1000);
        Long averageFirstTypeMs =
            poolInfoAverageFirstWaitMs.get(pool.getPoolInfo());
        poolMetrics.setCounter(MetricName.AVE_FIRST_WAIT_MS,
            (averageFirstTypeMs == null) ?
                0 : averageFirstTypeMs.longValue());

        newPoolNameToMetrics.put(pool.getPoolInfo(), poolMetrics);
        poolGroupSessions += pool.getScheduleQueue().size();
      }

      MetricsRecord poolGroupRecord =
          poolInfoToMetricsRecord.get(poolGroup.getName());
      if (poolGroupRecord == null) {
        poolGroupRecord = metrics.getContext().createRecord(
            "poolgroup-" + poolGroup.getName());
        poolInfoToMetricsRecord.put(poolGroup.getPoolInfo(), poolGroupRecord);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.