Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HTableDescriptor


      byte [] tableName = getBytes(in_tableName);
      try {
        if (admin.tableExists(tableName)) {
          throw new AlreadyExists("table name already in use");
        }
        HTableDescriptor desc = new HTableDescriptor(tableName);
        for (ColumnDescriptor col : columnFamilies) {
          HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col);
          desc.addFamily(colDesc);
        }
        admin.createTable(desc);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw new IOError(e.getMessage());
View Full Code Here


      try {
        TreeMap<ByteBuffer, ColumnDescriptor> columns =
          new TreeMap<ByteBuffer, ColumnDescriptor>();

        HTable table = getTable(tableName);
        HTableDescriptor desc = table.getTableDescriptor();

        for (HColumnDescriptor e : desc.getFamilies()) {
          ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
          columns.put(col.name, col);
        }
        return columns;
      } catch (IOException e) {
View Full Code Here

    // Clear it if we fail queuing an open executor.
    addRegionsInTransition(region, OPEN);
    try {
      LOG.info("Received request to open region: " +
        region.getRegionNameAsString());
      HTableDescriptor htd = null;
      if (htds == null) {
        htd = this.tableDescriptors.get(region.getTableName());
      } else {
        htd = htds.get(region.getTableNameAsString());
        if (htd == null) {
View Full Code Here

    }

    String tableName = Bytes.toString(hi.getTableName());
    TableInfo tableInfo = tablesInfo.get(tableName);
    Preconditions.checkNotNull("Table " + tableName + "' not present!", tableInfo);
    HTableDescriptor template = tableInfo.getHTD();

    // find min and max key values
    Pair<byte[],byte[]> orphanRegionRange = null;
    for (FileStatus cf : dirs) {
      String cfName= cf.getPath().getName();
      // TODO Figure out what the special dirs are
      if (cfName.startsWith(".") || cfName.equals("splitlog")) continue;

      FileStatus[] hfiles = fs.listStatus(cf.getPath());
      for (FileStatus hfile : hfiles) {
        byte[] start, end;
        HFile.Reader hf = null;
        try {
          CacheConfig cacheConf = new CacheConfig(getConf());
          hf = HFile.createReader(fs, hfile.getPath(), cacheConf);
          hf.loadFileInfo();
          KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey());
          start = startKv.getRow();
          KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey());
          end = endKv.getRow();
        } catch (IOException ioe) {
          LOG.warn("Problem reading orphan file " + hfile + ", skipping");
          continue;
        } catch (NullPointerException ioe) {
          LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
          continue;
        } finally {
          if (hf != null) {
            hf.close();
          }
        }

        // expand the range to include the range of all hfiles
        if (orphanRegionRange == null) {
          // first range
          orphanRegionRange = new Pair<byte[], byte[]>(start, end);
        } else {
          // TODO add test

          // expand range only if the hfile is wider.
          if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) {
            orphanRegionRange.setFirst(start);
          }
          if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) {
            orphanRegionRange.setSecond(end);
          }
        }
      }
    }
    if (orphanRegionRange == null) {
      LOG.warn("No data in dir " + p + ", sidelining data");
      fixes++;
      sidelineRegionDir(fs, hi);
      return;
    }
    LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " +
        Bytes.toString(orphanRegionRange.getSecond()) + ")");

    // create new region on hdfs.  move data into place.
    HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
    LOG.info("Creating new region : " + hri);
    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
    Path target = region.getRegionDir();

    // rename all the data to new region
View Full Code Here

        // only executed once per table.
        modTInfo = new TableInfo(tableName);
        Path hbaseRoot = FSUtils.getRootDir(getConf());
        tablesInfo.put(tableName, modTInfo);
        try {
          HTableDescriptor htd =
              FSTableDescriptors.getTableDescriptor(hbaseRoot.getFileSystem(getConf()),
              hbaseRoot, tableName);
          modTInfo.htds.add(htd);
        } catch (IOException ioe) {
          if (!orphanTableDirs.containsKey(tableName)) {
View Full Code Here

   * @param tableName
   * @throws IOException
   */
  private boolean fabricateTableInfo(String tableName, Set<String> columns) throws IOException {
    if (columns ==null || columns.isEmpty()) return false;
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (String columnfamimly : columns) {
      htd.addFamily(new HColumnDescriptor(columnfamimly));
    }
    FSTableDescriptors.createTableDescriptor(htd, getConf(), true);
    return true;
  }
View Full Code Here

        Entry<String, Set<String>> entry = (Entry<String, Set<String>>) iter.next();
        String tableName = entry.getKey();
        LOG.info("Trying to fix orphan table error: " + tableName);
        if (j < htds.length) {
          if (tableName.equals(Bytes.toString(htds[j].getName()))) {
            HTableDescriptor htd = htds[j];
            LOG.info("fixing orphan table: " + tableName + " from cache");
            FSTableDescriptors.createTableDescriptor(
                hbaseRoot.getFileSystem(getConf()), hbaseRoot, htd, true);
            j++;
            iter.remove();
View Full Code Here

   */
  public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName)
      throws IOException {
    LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName)
        + " family = " + Bytes.toString(familyName));
    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
    htd.removeFamily(familyName);
    this.services.getTableDescriptors().add(htd);
    return htd;
  }
View Full Code Here

  public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd)
      throws IOException {
    LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName)
        + " HCD = " + hcd.toString());

    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
    byte [] familyName = hcd.getName();
    if(!htd.hasFamily(familyName)) {
      throw new InvalidFamilyOperationException("Family '" +
        Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
    }
    htd.addFamily(hcd);
    this.services.getTableDescriptors().add(htd);
    return htd;
  }
View Full Code Here

   */
  public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd)
      throws IOException {
    LOG.info("AddColumn. Table = " + Bytes.toString(tableName) + " HCD = " +
      hcd.toString());
    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
    if (htd == null) {
      throw new InvalidFamilyOperationException("Family '" +
        hcd.getNameAsString() + "' cannot be modified as HTD is null");
    }
    htd.addFamily(hcd);
    this.services.getTableDescriptors().add(htd);
    return htd;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HTableDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.