Examples of DatabaseWriter


Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    ChukwaConfiguration cc = new ChukwaConfiguration();
    String query = "select * from ["+table+"];";
    Macro mp = new Macro(dbSetup.current,query);
    query = mp.toString();
    try {
      DatabaseWriter db = new DatabaseWriter(dbSetup.cluster);
      ResultSet rs = db.query(query);
      while(rs.next()) {
        int i = 1;
        String value = rs.getString(i);
      }
      db.close();
    } catch(SQLException ex) {
      fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    }
  }

  public void testAggregator() {
    Aggregator dba = new Aggregator();
    DatabaseWriter db = new DatabaseWriter(dbSetup.cluster);
    dba.setWriter(db);
    String queries = Aggregator.getContents(new File(System
        .getenv("CHUKWA_CONF_DIR")
        + File.separator + "aggregator.sql"));
    String[] query = queries.split("\n");
    for (int i = 0; i < query.length; i++) {
      if(query[i].indexOf("#")==-1) {
        try {
          dba.process(query[i]);
          assertTrue("Completed query: "+query[i],true);
        } catch(Throwable ex) {
          fail("Exception: "+ExceptionUtil.getStackTrace(ex));
        }
      }
    }
    db.close();
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    log.info("Aggregator started.");
    String cluster = System.getProperty("CLUSTER");
    if (cluster == null) {
      cluster = "unknown";
    }
    db = new DatabaseWriter(cluster);
    String queries = Aggregator.getContents(new File(System
        .getenv("CHUKWA_CONF_DIR")
        + File.separator + "aggregator.sql"));
    String[] query = queries.split("\n");
    while(startTime<=endTime) {
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

            try {
                String cluster = System.getProperty("CLUSTER");
                if(cluster==null) {
                    cluster="unknown";
                }
                db = new DatabaseWriter(cluster);
                DatabaseMetaData dbMetaData = db.getConnection().getMetaData();
                ResultSet rs = dbMetaData.getColumns ( null,null,table[0], null);
                boolean first=true;
                while(rs.next()) {
                    if(!first) {
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  public void dropTables(long start, long end) {
    String cluster = System.getProperty("CLUSTER");
    if (cluster == null) {
      cluster = "unknown";
    }
    DatabaseWriter dbw = new DatabaseWriter(cluster);
    try {
      HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
      Iterator<String> ki = dbNames.keySet().iterator();
      while (ki.hasNext()) {
        String name = ki.next();
        String tableName = dbNames.get(name);
        String[] tableList = dbc.findTableName(tableName, start, end);
        for (String tl : tableList) {
          log.debug("table name: " + tableList[0]);
          try {
            String[] parts = tl.split("_");
            int partition = Integer.parseInt(parts[parts.length - 2]);
            String table = "";
            for (int i = 0; i < parts.length - 2; i++) {
              if (i != 0) {
                table = table + "_";
              }
              table = table + parts[i];
            }
            partition = partition - 3;
            String dropPartition = "drop table if exists " + table + "_"
                + partition + "_" + parts[parts.length - 1];
            dbw.execute(dropPartition);
            partition--;
            if(partition>=0) {
              dropPartition = "drop table if exists " + table + "_" + partition
                  + "_" + parts[parts.length - 1];
              dbw.execute(dropPartition);
            }
          } catch (NumberFormatException e) {
            log
                .error("Error in parsing table partition number, skipping table:"
                    + tableList[0]);
          } catch (ArrayIndexOutOfBoundsException e) {
            log.debug("Skipping table:" + tableList[0]
                + ", because it has no partition configuration.");
          }
        }
      }
      dbw.close();
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    if (!cluster.equals("")) {
      ClusterConfig cc = new ClusterConfig();
      jdbc_url = cc.getURL(cluster);
    }
    try {
      DatabaseWriter dbWriter = new DatabaseWriter(cluster);
      conn = dbWriter.getConnection();
    } catch(Exception ex) {
      throw new Exception("JDBC URL does not exist for:"+jdbc_url);
    }
    log.debug("Initialized JDBC URL: " + jdbc_url);
    HashMap<String, String> dbNames = mdlConfig.startWith("report.db.name.");
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  public void createTables(long start, long end) throws Exception {
    String cluster = System.getProperty("CLUSTER");
    if (cluster == null) {
      cluster = "unknown";
    }
    DatabaseWriter dbw = new DatabaseWriter(cluster);
    HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
    Iterator<String> ki = dbNames.keySet().iterator();
    while (ki.hasNext()) {
      String name = ki.next();
      String tableName = dbNames.get(name);
      String[] tableList = dbc.findTableName(tableName, start, end);
      log.debug("table name: " + tableList[0]);
      try {
        String[] parts = tableList[0].split("_");
        int partition = Integer.parseInt(parts[parts.length - 2]);
        String table = "";
        for (int i = 0; i < parts.length - 2; i++) {
          if (i != 0) {
            table = table + "_";
          }
          table = table + parts[i];
        }
        String query = "show create table " + table + "_template;";
        ResultSet rs = dbw.query(query);
        while (rs.next()) {
          log.debug("table schema: " + rs.getString(2));
          query = rs.getString(2);
          log.debug("template table name:" + table + "_template");
          log.debug("replacing with table name:" + table + "_" + partition
              + "_" + parts[parts.length - 1]);
          log.debug("creating table: " + query);
          String createPartition = query.replaceFirst(table + "_template",
              table + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
          partition++;
          createPartition = query.replaceFirst(table + "_template", table
              + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
          partition++;
          createPartition = query.replaceFirst(table + "_template", table
              + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
        }
      } catch (NumberFormatException e) {
        log.error("Error in parsing table partition number, skipping table:"
            + tableList[0]);
      } catch (ArrayIndexOutOfBoundsException e) {
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

      long start = aYearAgo.getTimeInMillis()//starting from 2008/01/01
      long end = start + (interval*60000);
        log.debug("start time: "+start);
        log.debug("end time: "+end);
      Calendar now = Calendar.getInstance();
      DatabaseWriter db = new DatabaseWriter();
      String fields = null;
      String dateclause = null;
      boolean emptyPrimeKey = false;
      log.debug("Consolidate for "+interval+" minutes interval.");
      String table = this.table+"_"+interval;
      // Find the most recent entry
      try {
          String query = "select * from "+table+" order by timestamp desc limit 1";
              log.debug("Query: "+query);
              rs = db.query(query);
              if(rs==null) {
                  throw new SQLException("Table undefined.");
              }
              ResultSetMetaData rmeta = rs.getMetaData();
              boolean empty=true;
              if(rs.next()) {
                  for(int i=1;i<=rmeta.getColumnCount();i++) {
                    if(rmeta.getColumnName(i).toLowerCase().equals("timestamp")) {
                      start = rs.getTimestamp(i).getTime();
                    }
                  }
                  empty=false;
              }
              if(empty) {
                  throw new SQLException("Table is empty.");
              }
                end = start + (interval*60000);
      } catch (SQLException ex) {
          try {
            String query = "select * from "+this.table+" order by timestamp limit 1";
                log.debug("Query: "+query);
                  rs = db.query(query);
                  if(rs.next()) {
                    ResultSetMetaData rmeta = rs.getMetaData();
                    for(int i=1;i<=rmeta.getColumnCount();i++) {
                        if(rmeta.getColumnName(i).toLowerCase().equals("timestamp")) {
                          start = rs.getTimestamp(i).getTime();
                        }
                    }
            }
                    end = start + (interval*60000);
        } catch(SQLException ex2) {
            log.error("Unable to determine starting point in table: "+this.table);
          log.error("SQL Error:"+ExceptionUtil.getStackTrace(ex2));
          return;
        }
      }
      try {
                ResultSetMetaData rmeta = rs.getMetaData();
                int col = rmeta.getColumnCount();
                columns = new String[col];
                columnsType = new int[col];
                for(int i=1;i<=col;i++) {
                  columns[i-1]=rmeta.getColumnName(i);
                    columnsType[i-1]=rmeta.getColumnType(i);
                }

            for(int i=0;i<columns.length;i++) {
              if(i==0) {
                fields=columns[i];
                    if(columnsType[i]==java.sql.Types.VARCHAR) {
                      groupBy = " group by "+columns[i];
                    }
              } else {
                if(columnsType[i]==java.sql.Types.VARCHAR || columnsType[i]==java.sql.Types.TIMESTAMP) {
                      fields=fields+","+columns[i];
                      if(columnsType[i]==java.sql.Types.VARCHAR) {
                        groupBy = " group by "+columns[i];
                      }
                } else {
                      fields=fields+",AVG("+columns[i]+") as "+columns[i];
                }
              }
            }
      } catch(SQLException ex) {
          log.error("SQL Error:"+ExceptionUtil.getStackTrace(ex));
          return;
      }
            if(groupBy.equals("")) {
              emptyPrimeKey = true;
            }
      long previousStart = start;
          while(end < now.getTimeInMillis()-(interval*2*60000)) {
          // Select new data sample for the given intervals
          if(interval == 5) {
            table=this.table;
          } else if(interval == 30) {
            table=this.table+"_5";       
          } else if(interval == 120) {
            table=this.table+"_30";
          }
              SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
          String startS = formatter.format(start);
          String endS = formatter.format(end);
          dateclause = "Timestamp >= '"+startS+"' and Timestamp <= '"+endS+"'";
          if(emptyPrimeKey) {
            groupBy = "group by "+dateclause;
          }
        String query = "insert ignore into "+this.table+"_"+interval+" (select "+fields+" from "+table+" where "+dateclause+groupBy+")";
        log.debug(query);
                db.execute(query);
                db.close();
            if(previousStart == start) {
              start = start + (interval*60000);
              end = start + (interval*60000);
                previousStart = start;
            }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

 
  private DatabaseWriter db = null;
  private DataConfig dc = new DataConfig();
 
  public MetricDataLoader() {
    db = new DatabaseWriter();
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

      jdbc_url = cc.getURL(cluster);
    }
    HashMap<String, String> dbNames = mdlConfig.startWith("report.db.name.");
    Iterator<String> ki = dbNames.keySet().iterator();
    dbSchema = new HashMap<String, HashMap<String, Integer>>();
    DatabaseWriter dbWriter = new DatabaseWriter(cluster);
    while (ki.hasNext()) {
      String table = dbNames.get(ki.next().toString());
      String query = "select * from " + table + "_template limit 1";
      try {
        ResultSet rs = dbWriter.query(query);
        ResultSetMetaData rmeta = rs.getMetaData();
        HashMap<String, Integer> tableSchema = new HashMap<String, Integer>();
        for (int i = 1; i <= rmeta.getColumnCount(); i++) {
          tableSchema.put(rmeta.getColumnName(i), rmeta.getColumnType(i));
        }
        dbSchema.put(table, tableSchema);
      } catch (SQLException ex) {
        log.debug("table: " + table
          + " template does not exist, MDL will not load data for this table.");
      }
    }
    dbWriter.close();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.