/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dappervision.hbase.mapred;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.util.StringUtils;
import com.dappervision.hbase.mapred.TypedBytesTableInputFormatBase;
import org.apache.hadoop.hbase.filter.RowFilter;
import org.apache.hadoop.hbase.filter.RegexStringComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import java.io.UnsupportedEncodingException;
import org.apache.commons.codec.binary.Base64;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapred.TableSplit;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.InputFormat;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobConfigurable;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.record.Buffer;
import org.apache.hadoop.typedbytes.TypedBytesWritable;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.util.StringUtils;
import com.dappervision.hbase.mapred.TypedBytesTableRecordReader;
import com.dappervision.hbase.mapred.TypedBytesTableRecordReaderSingleValue;
/**
* Convert HBase tabular data into a format that is consumable by Map/Reduce.
*/
@Deprecated
public class TypedBytesTableInputFormat extends TypedBytesTableInputFormatBase implements
JobConfigurable {
private final Log LOG = LogFactory.getLog(TypedBytesTableInputFormat.class);
/**
* space delimited list of columns
*/
public static final String COLUMN_LIST = "hbase.mapred.tablecolumnsb64";
public static final String ROW_FILTER_REGEX = "hbase.mapred.rowfilter";
public static final String START_ROW = "hbase.mapred.startrowb64";
public static final String STOP_ROW = "hbase.mapred.stoprowb64";
public static final String VALUE_FORMAT = "hbase.mapred.valueformat";
private byte [][] inputColumns;
private HTable table;
private TypedBytesTableRecordReader tableRecordReader;
/**
* Builds a TableRecordReader. If no TableRecordReader was provided, uses
* the default.
*
* @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
* JobConf, Reporter)
*/
public void configure(JobConf job) {
Path[] tableNames = FileInputFormat.getInputPaths(job);
String colArg = job.get(COLUMN_LIST);
String[] colNames = colArg.split(" ");
byte [][] m_cols = new byte[colNames.length][];
for (int i = 0; i < m_cols.length; i++) {
m_cols[i] = Base64.decodeBase64(Bytes.toBytes(colNames[i]));
}
setInputColumns(m_cols);
if (job.get(ROW_FILTER_REGEX) != null) {
LOG.info("Row Regex Filter[" + job.get(ROW_FILTER_REGEX) + "]");
setRowFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(job.get(ROW_FILTER_REGEX))));
}
if (job.get(START_ROW) != null) {
LOG.info("Start Row[" + job.get(START_ROW) + "]");
try {
setStartRow(Base64.decodeBase64(job.get(START_ROW).getBytes("US-ASCII")));
} catch( UnsupportedEncodingException e){
LOG.error("Start Row[" + job.get(START_ROW) + "] - Error");
}
}
if (job.get(STOP_ROW) != null) {
LOG.info("Stop Row[" + job.get(STOP_ROW) + "]");
try {
setStopRow(Base64.decodeBase64(job.get(STOP_ROW).getBytes("US-ASCII")));
} catch( UnsupportedEncodingException e){
LOG.error("Stop Row[" + job.get(STOP_ROW) + "] - Error");
}
}
try {
setHTable(new HTable(HBaseConfiguration.create(job), tableNames[0].getName()));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
if (job.get(VALUE_FORMAT) != null && job.get(VALUE_FORMAT).equalsIgnoreCase("singlevalue")) {
LOG.info("Value Format[" + job.get(VALUE_FORMAT) + "]");
super.setTableRecordReader(new TypedBytesTableRecordReaderSingleValue());
} else {
LOG.info("Value Format[familiescolumns]");
super.setTableRecordReader(new TypedBytesTableRecordReader());
}
}
public void validateInput(JobConf job) throws IOException {
// expecting exactly one path
Path [] tableNames = FileInputFormat.getInputPaths(job);
if (tableNames == null || tableNames.length > 1) {
throw new IOException("expecting one table name");
}
// connected to table?
if (getHTable() == null) {
throw new IOException("could not connect to table '" +
tableNames[0].getName() + "'");
}
// expecting at least one column
String colArg = job.get(COLUMN_LIST);
if (colArg == null || colArg.length() == 0) {
throw new IOException("expecting at least one column");
}
}
}