Examples of DiskUsage


Examples of models.monitor.MonitorProvider.DiskUsage

        .getTotalCommandCountInAgentCommandMetadatas();
   
    int runningJobCount = ActorConfig.runningJobCount.get();
    MonitorProvider mp= MonitorProvider.getInstance();
    PerformUsage performaUsage = mp.currentJvmPerformUsage;
    DiskUsage diskUsage = mp.currentDiskUsage;
     

    HashMap<String, String> metricMap = new HashMap<String, String>();

    String lastRefreshDataValid = DateUtils
View Full Code Here

Examples of org.apache.accumulo.core.client.admin.DiskUsage

      }
    }

    List<DiskUsage> finalUsages = new ArrayList<DiskUsage>();
    for (TDiskUsage diskUsage : diskUsages) {
      finalUsages.add(new DiskUsage(new TreeSet<String>(diskUsage.getTables()), diskUsage.getUsage()));
    }

    return finalUsages;
  }
View Full Code Here

Examples of org.apache.accumulo.core.client.admin.DiskUsage

  @Override
  public List<DiskUsage> getDiskUsage(Set<String> tables) throws AccumuloException, AccumuloSecurityException {

    List<DiskUsage> diskUsages = new ArrayList<DiskUsage>();
    diskUsages.add(new DiskUsage(new TreeSet<String>(tables), 0l));

    return diskUsages;
  }
View Full Code Here

Examples of org.apache.accumulo.core.client.admin.DiskUsage

      }
    }

    List<DiskUsage> finalUsages = new ArrayList<DiskUsage>();
    for (TDiskUsage diskUsage : diskUsages) {
      finalUsages.add(new DiskUsage(new TreeSet<String>(diskUsage.getTables()), diskUsage.getUsage()));
    }

    return finalUsages;
  }
View Full Code Here

Examples of org.apache.accumulo.core.client.admin.DiskUsage

  @Override
  public List<DiskUsage> getDiskUsage(Set<String> tables) throws AccumuloException, AccumuloSecurityException {

    List<DiskUsage> diskUsages = new ArrayList<DiskUsage>();
    diskUsages.add(new DiskUsage(new TreeSet<String>(tables), 0l));

    return diskUsages;
  }
View Full Code Here

Examples of org.apache.accumulo.proxy.thrift.DiskUsage

      org.apache.accumulo.proxy.thrift.AccumuloSecurityException, org.apache.accumulo.proxy.thrift.TableNotFoundException, TException {
    try {
      List<org.apache.accumulo.core.client.admin.DiskUsage> diskUsages = getConnector(login).tableOperations().getDiskUsage(tables);
      List<DiskUsage> retUsages = new ArrayList<DiskUsage>();
      for (org.apache.accumulo.core.client.admin.DiskUsage diskUsage : diskUsages) {
        DiskUsage usage = new DiskUsage();
        usage.setTables(new ArrayList<String>(diskUsage.getTables()));
        usage.setUsage(diskUsage.getUsage());
        retUsages.add(usage);
      }
      return retUsages;
    } catch (Exception e) {
      handleExceptionTNF(e);
View Full Code Here

Examples of org.elasticsearch.cluster.DiskUsage

                logger.trace("Unable to determine disk usages for disk-aware allocation, allowing allocation");
            }
            return allocation.decision(Decision.YES, NAME, "disk usages unavailable");
        }

        DiskUsage usage = usages.get(node.nodeId());
        if (usage == null) {
            // If there is no usage, and we have other nodes in the cluster,
            // use the average usage for all nodes as the usage for this node
            usage = averageUsage(node, usages);
            if (logger.isDebugEnabled()) {
                logger.debug("Unable to determine disk usage for [{}], defaulting to average across nodes [{} total] [{} free] [{}% free]",
                        node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage());
            }
        }

        if (includeRelocations) {
            long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, shardSizes);
            DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(),
                    usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
            if (logger.isTraceEnabled()) {
                logger.trace("usage without relocations: {}", usage);
                logger.trace("usage with relocations: [{} bytes] {}", relocatingShardsSize, usageIncludingRelocations);
            }
View Full Code Here

Examples of org.elasticsearch.cluster.DiskUsage

                logger.trace("Unable to determine disk usages for disk-aware allocation, allowing allocation");
            }
            return allocation.decision(Decision.YES, NAME, "disk usages unavailable");
        }

        DiskUsage usage = usages.get(node.nodeId());
        if (usage == null) {
            // If there is no usage, and we have other nodes in the cluster,
            // use the average usage for all nodes as the usage for this node
            usage = averageUsage(node, usages);
            if (logger.isDebugEnabled()) {
                logger.debug("Unable to determine disk usage for {}, defaulting to average across nodes [{} total] [{} free] [{}% free]",
                        node.nodeId(), usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeDiskAsPercentage());
            }
        }

        if (includeRelocations) {
            Map<String, Long> shardSizes = clusterInfo.getShardSizes();
            long relocatingShardsSize = sizeOfRelocatingShards(node, allocation, shardSizes);
            DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(),
                    usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
            if (logger.isTraceEnabled()) {
                logger.trace("usage without relocations: {}", usage);
                logger.trace("usage with relocations: [{} bytes] {}", relocatingShardsSize, usageIncludingRelocations);
            }
View Full Code Here

Examples of org.elasticsearch.cluster.DiskUsage

        long freeBytes = 0;
        for (DiskUsage du : usages.values()) {
            totalBytes += du.getTotalBytes();
            freeBytes += du.getFreeBytes();
        }
        return new DiskUsage(node.nodeId(), node.node().name(), totalBytes / usages.size(), freeBytes / usages.size());
    }
View Full Code Here

Examples of org.elasticsearch.cluster.DiskUsage

                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();

        Map<String, DiskUsage> usages = new HashMap<>();
        usages.put("node1", new DiskUsage("node1", "node1", 100, 10)); // 90% used
        usages.put("node2", new DiskUsage("node2", "node2", 100, 35)); // 65% used
        usages.put("node3", new DiskUsage("node3", "node3", 100, 60)); // 40% used
        usages.put("node4", new DiskUsage("node4", "node4", 100, 80)); // 20% used

        Map<String, Long> shardSizes = new HashMap<>();
        shardSizes.put("[test][0][p]", 10L); // 10 bytes
        shardSizes.put("[test][0][r]", 10L);
        final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.