Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Mutation


   
    @Override
    public MetaDataMutationResult createTable(final List<Mutation> tableMetaData, byte[] physicalTableName, PTableType tableType,
            Map<String,Object> tableProps, final List<Pair<byte[],Map<String,Object>>> families, byte[][] splits) throws SQLException {
        byte[][] rowKeyMetadata = new byte[3][];
        Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetaData);
        byte[] key = m.getRow();
        SchemaUtil.getVarChars(key, rowKeyMetadata);
        byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        byte[] tableName = physicalTableName != null ? physicalTableName : SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
View Full Code Here


    @Override
    public MetaDataMutationResult addColumn(final List<Mutation> tableMetaData, List<Pair<byte[],Map<String,Object>>> families, PTable table) throws SQLException {
        byte[][] rowKeyMetaData = new byte[3][];
        PTableType tableType = table.getType();

        Mutation m = tableMetaData.get(0);
        byte[] rowKey = m.getRow();
        SchemaUtil.getVarChars(rowKey, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        byte[] schemaBytes = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] tableBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
View Full Code Here

        // table rows being indexed into the block cache, as the index maintenance code
        // does a point scan per row
        List<KeyRange> keys = Lists.newArrayListWithExpectedSize(miniBatchOp.size());
        List<IndexMaintainer> maintainers = new ArrayList<IndexMaintainer>();
        for (int i = 0; i < miniBatchOp.size(); i++) {
            Mutation m = miniBatchOp.getOperation(i).getFirst();
            keys.add(PDataType.VARBINARY.getKeyRange(m.getRow()));
            maintainers.addAll(getCodec().getIndexMaintainers(m.getAttributesMap()));
        }
        Scan scan = IndexManagementUtil.newLocalStateScan(maintainers);
        ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN);
        scanRanges.setScanStartStopRow(scan);
        scan.setFilter(scanRanges.getSkipScanFilter());
View Full Code Here

  }

  private void validate(IndexUpdateManager manager, List<Mutation> pending) {
    for (Pair<Mutation, byte[]> entry : manager.toMap()) {
      assertEquals("Table name didn't match for stored entry!", table, entry.getSecond());
      Mutation m = pending.remove(0);
      // test with == to match the exact entries, Mutation.equals just checks the row
      assertTrue(
        "Didn't get the expected mutation! Expected: " + m + ", but got: " + entry.getFirst(),
        m == entry.getFirst());
    }
View Full Code Here

                } else {
                    if (op == Sequence.MetaOp.CREATE_SEQUENCE) {
                        return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode());
                    }
                }
                Mutation m = null;
                switch (op) {
                case RETURN_SEQUENCE:
                    KeyValue currentValueKV = result.raw()[0];
                    long expectedValue = PDataType.LONG.getCodec().decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault());
                    long value = PDataType.LONG.getCodec().decodeLong(currentValueKV.getBuffer(), currentValueKV.getValueOffset(), SortOrder.getDefault());
                    // Timestamp should match exactly, or we may have the wrong sequence
                    if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) {
                        return new Result(Collections.singletonList(KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), ByteUtil.EMPTY_BYTE_ARRAY)));
                    }
                    m = new Put(row, currentValueKV.getTimestamp());
                    m.getFamilyMap().putAll(append.getFamilyMap());
                    break;
                case DROP_SEQUENCE:
                    m = new Delete(row, clientTimestamp, null);
                    break;
                case CREATE_SEQUENCE:
                    m = new Put(row, clientTimestamp);
                    m.getFamilyMap().putAll(append.getFamilyMap());
                    break;
                }
                if (!hadClientTimestamp) {
                    for (List<KeyValue> kvs : m.getFamilyMap().values()) {
                        for (KeyValue kv : kvs) {
                            kv.updateLatestStamp(clientTimestampBuf);
                        }
                    }
                }
View Full Code Here


      // only make the put if the index update has been setup
      if (update.isValid()) {
        byte[] table = update.getTableName();
        Mutation mutation = update.getUpdate();
        updateMap.addIndexUpdate(table, mutation);

        // only make the cleanup if we made a put and need cleanup
        if (needsCleanup) {
          // there is a TS for the interested columns that is greater than the columns in the
          // put. Therefore, we need to issue a delete at the same timestamp
          Delete d = new Delete(mutation.getRow());
          d.setTimestamp(tracker.getTS());
          updateMap.addIndexUpdate(table, d);
        }
      }
    }
View Full Code Here

   * @param updates current updates
   * @param pendingMutation
   */
  protected void fixUpCurrentUpdates(Collection<Mutation> updates, Mutation pendingMutation) {
    // need to check for each entry to see if we have a duplicate
    Mutation toRemove = null;
    Delete pendingDelete = pendingMutation instanceof Delete ? (Delete) pendingMutation : null;
    boolean sawRowMatch = false;
    for (Mutation stored : updates) {
      int compare = pendingMutation.compareTo(stored);
      // skip to the right row
View Full Code Here

  public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
      MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
      TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
      for (int i = 0; i < miniBatchOp.size(); i++) {
        Mutation m = miniBatchOp.getOperation(i);
        if (m.getAttribute(CHECK_COVERING_PERM) != null) {
          // We have a failure with table, cf and q perm checks and now giving a chance for cell
          // perm check
          OpType opType;
          if (m instanceof Put) {
            checkForReservedTagPresence(getActiveUser(), m);
            opType = OpType.PUT;
          } else {
            opType = OpType.DELETE;
          }
          AuthResult authResult = null;
          if (checkCoveringPermission(opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(),
              m.getTimeStamp(), Action.WRITE)) {
            authResult = AuthResult.allow(opType.toString(), "Covering cell set", getActiveUser(),
                Action.WRITE, table, m.getFamilyCellMap());
          } else {
            authResult = AuthResult.deny(opType.toString(), "Covering cell set", getActiveUser(),
                Action.WRITE, table, m.getFamilyCellMap());
          }
          logResult(authResult);
          if (!authResult.isAllowed()) {
            throw new AccessDeniedException("Insufficient permissions "
                + authResult.toContextString());
View Full Code Here

    }

    int count = entry.getAssociatedCellCount();
    List<Pair<MutationType, Mutation>> mutations = new ArrayList<Pair<MutationType, Mutation>>();
    Cell previousCell = null;
    Mutation m = null;
    HLogKey key = null;
    WALEdit val = null;
    if (logEntry != null) val = new WALEdit();

    for (int i = 0; i < count; i++) {
View Full Code Here

      throws IOException {
    /* Run coprocessor pre hook outside of locks to avoid deadlock */
    WALEdit walEdit = new WALEdit();
    if (coprocessorHost != null) {
      for (int i = 0 ; i < batchOp.operations.length; i++) {
        Mutation m = batchOp.operations[i];
        if (m instanceof Put) {
          if (coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
            // pre hook says skip this Put
            // mark as success and skip in doMiniBatchMutation
            batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
          }
        } else if (m instanceof Delete) {
          Delete curDel = (Delete) m;
          if (curDel.getFamilyCellMap().isEmpty()) {
            // handle deleting a row case
            prepareDelete(curDel);
          }
          if (coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
            // pre hook says skip this Delete
            // mark as success and skip in doMiniBatchMutation
            batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
          }
        } else {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Mutation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.