Examples of DatabaseWriter


Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    ChukwaConfiguration cc = new ChukwaConfiguration();
    String query = "select * from ["+table+"];";
    Macro mp = new Macro(current,query);
    query = mp.toString();
    try {
      DatabaseWriter db = new DatabaseWriter(cluster);
      ResultSet rs = db.query(query);
      while(rs.next()) {
        int i = 1;
        String value = rs.getString(i);
      }
      db.close();
    } catch(SQLException ex) {
      fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  }

  public void testTableCreator() {
    for(int i=0;i<timeWindow.length;i++) {
      try {
        DatabaseWriter db = new DatabaseWriter(cluster);
        for(String table : tables) {
          String query = "select * from ["+table+"];";
          Macro mp = new Macro(current,query);
          query = mp.toString();
          ResultSet rs = db.query(query);
          rs.last();
          int count = rs.getRow();
          assertTrue("Table should exist and return empty result.", count==0);
        }
        db.close();
      } catch(SQLException ex) {
        fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  String cluster = "demo";
  long current = Calendar.getInstance().getTimeInMillis();

  public void setUp() {
    System.setProperty("CLUSTER","demo");
    DatabaseWriter db = new DatabaseWriter(cluster);
    String buffer = "";
    File aFile = new File(System.getenv("CHUKWA_CONF_DIR")
                 + File.separator + "database_create_tables.sql");
    buffer = readFile(aFile);
    String tables[] = buffer.split(";");
    for(String table : tables) {
      if(table.length()>5) {
        db.execute(table);
      }
    }
    db.close();
    for(int i=0;i<timeWindow.length;i++) {
      TableCreator tc = new TableCreator();
      long start = current;
      long end = current + (timeWindow[i]*1440*60*1000);
      tc.createTables(start, end);
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

      tc.createTables(start, end);
    }
  }

  public void tearDown() {
    DatabaseWriter db = null;
    try {
      db = new DatabaseWriter(cluster);
      ResultSet rs = db.query("show tables");
      ArrayList<String> list = new ArrayList<String>();
      while(rs.next()) {
        String table = rs.getString(1);
        list.add(table);
      }
      for(String table : list) {
        db.execute("drop table "+table);
      }
    } catch(Throwable ex) {
    } finally {
      if(db!=null) {
        db.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    ChukwaConfiguration cc = new ChukwaConfiguration();
    String query = "select * from ["+table+"];";
    Macro mp = new Macro(current,query);
    query = mp.toString();
    try {
      DatabaseWriter db = new DatabaseWriter(cluster);
      ResultSet rs = db.query(query);
      while(rs.next()) {
        int i = 1;
        String value = rs.getString(i);
      }
      db.close();
    } catch(SQLException ex) {
      fail("SQL Exception: "+ExceptionUtil.getStackTrace(ex));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    return contents.toString();
  }

  public void testAggregator() {
    Aggregator dba = new Aggregator();
    DatabaseWriter db = new DatabaseWriter(cluster);
    dba.setWriter(db);
    String queries = Aggregator.getContents(new File(System
        .getenv("CHUKWA_CONF_DIR")
        + File.separator + "aggregator.sql"));
    String[] query = queries.split("\n");
    for (int i = 0; i < query.length; i++) {
      if(query[i].indexOf("#")==-1) {
        try {
          dba.process(query[i]);
          assertTrue("Completed query: "+query[i],true);
        } catch(Throwable ex) {
          fail("Exception: "+ExceptionUtil.getStackTrace(ex));
        }
      }
    }
    db.close();
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  public void createTables(long start, long end) throws Exception {
    String cluster = System.getProperty("CLUSTER");
    if (cluster == null) {
      cluster = "unknown";
    }
    DatabaseWriter dbw = new DatabaseWriter(cluster);
    HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
    Iterator<String> ki = dbNames.keySet().iterator();
    while (ki.hasNext()) {
      String name = ki.next();
      String tableName = dbNames.get(name);
      if (!RegexUtil.isRegex(tableName)) {
        log.warn("Skipping tableName: '" + tableName
            + "' because there was an error parsing it as a regex: "
            + RegexUtil.regexError(tableName));
        return;
      }
      String[] tableList = dbc.findTableName(tableName, start, end);
      log.debug("table name: " + tableList[0]);
      try {
        String[] parts = tableList[0].split("_");
        int partition = Integer.parseInt(parts[parts.length - 2]);
        String table = "";
        for (int i = 0; i < parts.length - 2; i++) {
          if (i != 0) {
            table = table + "_";
          }
          table = table + parts[i];
        }
        String query = "show create table " + table + "_template;";
        ResultSet rs = dbw.query(query);
        while (rs.next()) {
          log.debug("table schema: " + rs.getString(2));
          query = rs.getString(2);
          log.debug("template table name:" + table + "_template");
          log.debug("replacing with table name:" + table + "_" + partition
              + "_" + parts[parts.length - 1]);
          log.debug("creating table: " + query);
          String createPartition = query.replaceFirst(table + "_template",
              table + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
          partition++;
          createPartition = query.replaceFirst(table + "_template", table
              + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
          partition++;
          createPartition = query.replaceFirst(table + "_template", table
              + "_" + partition + "_" + parts[parts.length - 1]);
          createPartition = createPartition.replaceFirst("TABLE",
              "TABLE IF NOT EXISTS");
          dbw.execute(createPartition);
        }
      } catch (NumberFormatException e) {
        log.error("Error in parsing table partition number, skipping table:"
            + tableList[0]);
      } catch (ArrayIndexOutOfBoundsException e) {
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

  public void dropTables(long start, long end) {
    String cluster = System.getProperty("CLUSTER");
    if (cluster == null) {
      cluster = "unknown";
    }
    DatabaseWriter dbw = new DatabaseWriter(cluster);
    try {
      HashMap<String, String> dbNames = dbc.startWith("report.db.name.");
      Iterator<String> ki = dbNames.keySet().iterator();
      while (ki.hasNext()) {
        String name = ki.next();
        String tableName = dbNames.get(name);
        if (!RegexUtil.isRegex(tableName)) {
          log.warn("Skipping tableName: '" + tableName
              + "' because there was an error parsing it as a regex: "
              + RegexUtil.regexError(tableName));
          return;
        }
        String[] tableList = dbc.findTableName(tableName, start, end);
        for (String tl : tableList) {
          log.debug("table name: " + tableList[0]);
          try {
            String[] parts = tl.split("_");
            int partition = Integer.parseInt(parts[parts.length - 2]);
            String table = "";
            for (int i = 0; i < parts.length - 2; i++) {
              if (i != 0) {
                table = table + "_";
              }
              table = table + parts[i];
            }
            partition = partition - 3;
            String dropPartition = "drop table if exists " + table + "_"
                + partition + "_" + parts[parts.length - 1];
            dbw.execute(dropPartition);
            partition--;
            if(partition>=0) {
              dropPartition = "drop table if exists " + table + "_" + partition
                  + "_" + parts[parts.length - 1];
              dbw.execute(dropPartition);
            }
          } catch (NumberFormatException e) {
            log
                .error("Error in parsing table partition number, skipping table:"
                    + tableList[0]);
          } catch (ArrayIndexOutOfBoundsException e) {
            log.debug("Skipping table:" + tableList[0]
                + ", because it has no partition configuration.");
          }
        }
      }
      dbw.close();
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

    if (!cluster.equals("")) {
      ClusterConfig cc = new ClusterConfig();
      jdbc_url = cc.getURL(cluster);
    }
    try {
      DatabaseWriter dbWriter = new DatabaseWriter(cluster);
      conn = dbWriter.getConnection();
    } catch(Exception ex) {
      throw new Exception("JDBC URL does not exist for:"+jdbc_url);
    }
    log.debug("Initialized JDBC URL: " + jdbc_url);
    HashMap<String, String> dbNames = mdlConfig.startWith("report.db.name.");
View Full Code Here

Examples of org.apache.hadoop.chukwa.util.DatabaseWriter

      time_online = new TimeHandler(this.request, this.timezone);
      start = time_online.getStartTime();
      end = time_online.getEndTime();
    }
   
    DatabaseWriter dbw = new DatabaseWriter(this.cluster);
   
    // setup query
    String query;
    if (this.query_state != null && this.query_state.equals("read")) {
      query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["+table+"] where finish_time between '[start]' and '[end]' and (state_name like 'read_local' or state_name like 'read_remote')";
    } else if (this.query_state != null && this.query_state.equals("write")) {
      query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["+table+"] where finish_time between '[start]' and '[end]' and (state_name like 'write_local' or state_name like 'write_remote' or state_name like 'write_replicated')";
    } else {
      query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["+table+"] where finish_time between '[start]' and '[end]' and state_name like '" + query_state + "'";
    }
    Macro mp = new Macro(start,end,query);
    query = mp.toString() + " order by start_time";
   
    ArrayList<HashMap<String, Object>> events = new ArrayList<HashMap<String, Object>>();

    ResultSet rs = null;
   
    log.debug("Query: " + query);
    // run query, extract results
    try {
      rs = dbw.query(query);
      ResultSetMetaData rmeta = rs.getMetaData();
      int col = rmeta.getColumnCount();
      while (rs.next()) {
        HashMap<String, Object> event = new HashMap<String, Object>();
        long event_time=0;
        for(int i=1;i<=col;i++) {
          if(rmeta.getColumnType(i)==java.sql.Types.TIMESTAMP) {
            event.put(rmeta.getColumnName(i),rs.getTimestamp(i).getTime());
          } else {
            event.put(rmeta.getColumnName(i),rs.getString(i));
          }
        }
        events.add(event);
      }
    } catch (SQLException ex) {
      // handle any errors
      log.error("SQLException: " + ex.getMessage());
      log.error("SQLState: " + ex.getSQLState());
      log.error("VendorError: " + ex.getErrorCode());
    } finally {
      dbw.close();
    }   
    SimpleDateFormat format = new SimpleDateFormat("MMM dd yyyy HH:mm:ss");

    log.info(events.size() + " results returned.");
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.