Examples of RowContainer


Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

   * this happens either when the input file of the big table is changed or in
   * closeop. It needs to fetch all the left data from the small tables and try
   * to join them.
   */
  private void joinFinalLeftData() throws HiveException {
    RowContainer bigTblRowContainer = this.candidateStorage[this.posBigTable];

    boolean allFetchDone = allFetchDone();
    // if all left data in small tables are less than and equal to the left data
    // in big table, let's them catch up
    while (bigTblRowContainer != null && bigTblRowContainer.size() > 0
        && !allFetchDone) {
      joinOneGroup();
      bigTblRowContainer = this.candidateStorage[this.posBigTable];
      allFetchDone = allFetchDone();
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    if (serde == null) {
      containerSize = -1;
    }

    RowContainer rc = new RowContainer(containerSize, hconf, reporter);
    StructObjectInspector rcOI = null;
    if (tblDesc != null) {
      // arbitrary column names used internally for serializing to spill table
      List<String> colNames = Utilities.getColumnNames(tblDesc.getProperties());
      // object inspector for serializing input tuples
      rcOI = ObjectInspectorFactory.getStandardStructObjectInspector(colNames,
          structFieldObjectInspectors);
    }

    rc.setSerDe(serde, rcOI);
    rc.setTableDesc(tblDesc);
    return rc;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

      values.add((ArrayList<Object>) dummyObj[pos]);
      dummyObjVectors[pos] = values;

      // if serde is null, the input doesn't need to be spilled out
      // e.g., the output columns does not contains the input table
      RowContainer rc = getRowContainer(hconf, pos, alias, joinCacheSize);
      storage.put(pos, rc);

      pos++;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    if (serde == null) {
      containerSize = 1;
    }

    RowContainer rc = new RowContainer(containerSize, hconf);
    StructObjectInspector rcOI = null;
    if (tblDesc != null) {
      // arbitrary column names used internally for serializing to spill table
      List<String> colNames = Utilities.getColumnNames(tblDesc.getProperties());
      // object inspector for serializing input tuples
      rcOI = ObjectInspectorFactory.getStandardStructObjectInspector(colNames,
          joinValuesStandardObjectInspectors.get(pos));
    }

    rc.setSerDe(serde, rcOI);
    rc.setTableDesc(tblDesc);
    return rc;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    // all other tables are small, and are cached in the hash table
    posBigTable = conf.getPosBigTable();

    emptyList = new RowContainer<ArrayList<Object>>(1, hconf);
    RowContainer bigPosRC = getRowContainer(hconf, (byte) posBigTable,
        order[posBigTable], joinCacheSize);
    storage.put((byte) posBigTable, bigPosRC);

    mapJoinRowsKey = HiveConf.getIntVar(hconf,
        HiveConf.ConfVars.HIVEMAPJOINROWSIZE);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

      values.add((ArrayList<Object>) dummyObj[pos]);
      dummyObjVectors[pos] = values;

      // if serde is null, the input doesn't need to be spilled out
      // e.g., the output columns does not contains the input table
      RowContainer rc = JoinUtil.getRowContainer(hconf,
          rowContainerStandardObjectInspectors.get((byte)pos),
          alias, joinCacheSize,spillTableDesc, conf,noOuterJoin);
      storage.put(pos, rc);

      pos++;
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    // all other tables are small, and are cached in the hash table
    posBigTable = conf.getPosBigTable();

    emptyList = new RowContainer<ArrayList<Object>>(1, hconf);
    RowContainer bigPosRC = JoinUtil.getRowContainer(hconf,
        rowContainerStandardObjectInspectors.get((byte) posBigTable),
        order[posBigTable], joinCacheSize,spillTableDesc, conf,noOuterJoin);
    storage.put((byte) posBigTable, bigPosRC);

    mapJoinRowsKey = HiveConf.getIntVar(hconf,
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    int bucketSize = HiveConf.getIntVar(hconf,
        HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE);
    byte storePos = (byte) 0;
    for (Byte alias : order) {
      RowContainer rc = JoinUtil.getRowContainer(hconf,
          rowContainerStandardObjectInspectors.get(storePos),
          alias, bucketSize,spillTableDesc, conf,noOuterJoin);
      nextGroupStorage[storePos] = rc;
      RowContainer candidateRC = JoinUtil.getRowContainer(hconf,
          rowContainerStandardObjectInspectors.get((byte)storePos),
          alias,bucketSize,spillTableDesc, conf,noOuterJoin);
      candidateStorage[alias] = candidateRC;
      storePos++;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

   * this happens either when the input file of the big table is changed or in
   * closeop. It needs to fetch all the left data from the small tables and try
   * to join them.
   */
  private void joinFinalLeftData() throws HiveException {
    RowContainer bigTblRowContainer = this.candidateStorage[this.posBigTable];

    boolean allFetchOpDone = allFetchOpDone();
    // if all left data in small tables are less than and equal to the left data
    // in big table, let's them catch up
    while (bigTblRowContainer != null && bigTblRowContainer.size() > 0
        && !allFetchOpDone) {
      joinOneGroup();
      bigTblRowContainer = this.candidateStorage[this.posBigTable];
      allFetchOpDone = allFetchOpDone();
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.persistence.RowContainer

    if (serde == null) {
      containerSize = -1;
    }

    RowContainer rc = new RowContainer(containerSize, hconf);
    StructObjectInspector rcOI = null;
    if (tblDesc != null) {
      // arbitrary column names used internally for serializing to spill table
      List<String> colNames = Utilities.getColumnNames(tblDesc.getProperties());
      // object inspector for serializing input tuples
      rcOI = ObjectInspectorFactory.getStandardStructObjectInspector(colNames,
          structFieldObjectInspectors);
    }

    rc.setSerDe(serde, rcOI);
    rc.setTableDesc(tblDesc);
    return rc;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.