/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.shims;
import java.io.IOException;
import java.lang.Integer;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Map;
import com.facebook.presto.hive.shaded.org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.WebHCatJTShim23;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
import org.apache.hadoop.mapreduce.util.HostUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Implemention of shims against Hadoop 0.23.0.
*/
public class Hadoop23Shims extends HadoopShimsSecure {
@Override
public String getTaskAttemptLogUrl(JobConf conf,
String taskTrackerHttpAddress, String taskAttemptId)
throws MalformedURLException {
if (conf.get("mapreduce.framework.name") != null
&& conf.get("mapreduce.framework.name").equals("yarn")) {
// if the cluster is running in MR2 mode, return null
LOG.warn("Can't fetch tasklog: TaskLogServlet is not supported in MR2 mode.");
return null;
} else {
// if the cluster is running in MR1 mode, using HostUtil to construct TaskLogURL
URL taskTrackerHttpURL = new URL(taskTrackerHttpAddress);
return HostUtil.getTaskLogUrl(taskTrackerHttpURL.getHost(),
Integer.toString(taskTrackerHttpURL.getPort()),
taskAttemptId);
}
}
@Override
public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception {
JobTrackerState state;
switch (clusterStatus.getJobTrackerStatus()) {
case INITIALIZING:
return JobTrackerState.INITIALIZING;
case RUNNING:
return JobTrackerState.RUNNING;
default:
String errorMsg = "Unrecognized JobTracker state: " + clusterStatus.getJobTrackerStatus();
throw new Exception(errorMsg);
}
}
@Override
public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable) {
return new TaskAttemptContextImpl(conf, new TaskAttemptID()) {
@Override
public void progress() {
progressable.progress();
}
};
}
@Override
public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
return new JobContextImpl(job.getConfiguration(), job.getJobID());
}
@Override
public boolean isLocalMode(Configuration conf) {
return "local".equals(conf.get("mapreduce.framework.name"));
}
@Override
public String getJobLauncherRpcAddress(Configuration conf) {
return conf.get("yarn.resourcemanager.address");
}
@Override
public void setJobLauncherRpcAddress(Configuration conf, String val) {
if (val.equals("local")) {
// LocalClientProtocolProvider expects both parameters to be 'local'.
conf.set("mapreduce.framework.name", val);
conf.set("mapreduce.jobtracker.address", val);
}
else {
conf.set("mapreduce.framework.name", "yarn");
conf.set("yarn.resourcemanager.address", val);
}
}
@Override
public String getJobLauncherHttpAddress(Configuration conf) {
return conf.get("yarn.resourcemanager.webapp.address");
}
@Override
public long getDefaultBlockSize(FileSystem fs, Path path) {
return fs.getDefaultBlockSize(path);
}
@Override
public short getDefaultReplication(FileSystem fs, Path path) {
return fs.getDefaultReplication(path);
}
@Override
public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf)
throws IOException {
return Trash.moveToAppropriateTrash(fs, path, conf);
}
@Override
public void setTotalOrderPartitionFile(JobConf jobConf, Path partitionFile){
TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile);
}
/**
* Returns a shim to wrap MiniMrCluster
*/
public MiniMrShim getMiniMrCluster(Configuration conf, int numberOfTaskTrackers,
String nameNode, int numDir) throws IOException {
return new MiniMrShim(conf, numberOfTaskTrackers, nameNode, numDir);
}
/**
* Shim for MiniMrCluster
*/
public class MiniMrShim implements HadoopShims.MiniMrShim {
private final MiniMRCluster mr;
private final Configuration conf;
public MiniMrShim(Configuration conf, int numberOfTaskTrackers,
String nameNode, int numDir) throws IOException {
this.conf = conf;
JobConf jConf = new JobConf(conf);
jConf.set("yarn.scheduler.capacity.root.queues", "default");
jConf.set("yarn.scheduler.capacity.root.default.capacity", "100");
mr = new MiniMRCluster(numberOfTaskTrackers, nameNode, numDir, null, null, jConf);
}
@Override
public int getJobTrackerPort() throws UnsupportedOperationException {
String address = conf.get("yarn.resourcemanager.address");
address = StringUtils.substringAfterLast(address, ":");
if (StringUtils.isBlank(address)) {
throw new IllegalArgumentException("Invalid YARN resource manager port.");
}
return Integer.parseInt(address);
}
@Override
public void shutdown() throws IOException {
mr.shutdown();
}
@Override
public void setupConfiguration(Configuration conf) {
JobConf jConf = mr.createJobConf();
for (Map.Entry<String, String> pair: jConf) {
//System.out.println("XXX Var: "+pair.getKey() +"="+pair.getValue());
//if (conf.get(pair.getKey()) == null) {
conf.set(pair.getKey(), pair.getValue());
//}
}
}
}
// Don't move this code to the parent class. There's a binary
// incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
// need to have two different shim classes even though they are
// exactly the same.
public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
int numDataNodes,
boolean format,
String[] racks) throws IOException {
return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
}
/**
* MiniDFSShim.
*
*/
public class MiniDFSShim implements HadoopShims.MiniDFSShim {
private final MiniDFSCluster cluster;
public MiniDFSShim(MiniDFSCluster cluster) {
this.cluster = cluster;
}
public FileSystem getFileSystem() throws IOException {
return cluster.getFileSystem();
}
public void shutdown() {
cluster.shutdown();
}
}
private volatile HCatHadoopShims hcatShimInstance;
@Override
public HCatHadoopShims getHCatShim() {
if(hcatShimInstance == null) {
hcatShimInstance = new HCatHadoopShims23();
}
return hcatShimInstance;
}
private final class HCatHadoopShims23 implements HCatHadoopShims {
@Override
public TaskID createTaskID() {
return new TaskID("", 0, TaskType.MAP, 0);
}
@Override
public TaskAttemptID createTaskAttemptID() {
return new TaskAttemptID("", 0, TaskType.MAP, 0, 0);
}
@Override
public org.apache.hadoop.mapreduce.TaskAttemptContext createTaskAttemptContext(Configuration conf,
org.apache.hadoop.mapreduce.TaskAttemptID taskId) {
return new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(
conf instanceof JobConf? new JobConf(conf) : conf,
taskId);
}
@Override
public org.apache.hadoop.mapred.TaskAttemptContext createTaskAttemptContext(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.mapred.TaskAttemptID taskId, Progressable progressable) {
org.apache.hadoop.mapred.TaskAttemptContext newContext = null;
try {
java.lang.reflect.Constructor construct = org.apache.hadoop.mapred.TaskAttemptContextImpl.class.getDeclaredConstructor(
org.apache.hadoop.mapred.JobConf.class, org.apache.hadoop.mapred.TaskAttemptID.class,
Reporter.class);
construct.setAccessible(true);
newContext = (org.apache.hadoop.mapred.TaskAttemptContext) construct.newInstance(
new JobConf(conf), taskId, (Reporter) progressable);
} catch (Exception e) {
throw new RuntimeException(e);
}
return newContext;
}
@Override
public JobContext createJobContext(Configuration conf,
JobID jobId) {
return new JobContextImpl(conf instanceof JobConf? new JobConf(conf) : conf,
jobId);
}
@Override
public org.apache.hadoop.mapred.JobContext createJobContext(org.apache.hadoop.mapred.JobConf conf,
org.apache.hadoop.mapreduce.JobID jobId, Progressable progressable) {
return new org.apache.hadoop.mapred.JobContextImpl(
new JobConf(conf), jobId, (org.apache.hadoop.mapred.Reporter) progressable);
}
@Override
public void commitJob(OutputFormat outputFormat, Job job) throws IOException {
// Do nothing as this was fixed by MAPREDUCE-1447.
}
@Override
public void abortJob(OutputFormat outputFormat, Job job) throws IOException {
// Do nothing as this was fixed by MAPREDUCE-1447.
}
@Override
public InetSocketAddress getResourceManagerAddress(Configuration conf) {
String addr = conf.get("yarn.resourcemanager.address", "localhost:8032");
return NetUtils.createSocketAddr(addr);
}
@Override
public String getPropertyName(PropertyName name) {
switch (name) {
case CACHE_ARCHIVES:
return MRJobConfig.CACHE_ARCHIVES;
case CACHE_FILES:
return MRJobConfig.CACHE_FILES;
case CACHE_SYMLINK:
return MRJobConfig.CACHE_SYMLINK;
}
return "";
}
@Override
public boolean isFileInHDFS(FileSystem fs, Path path) throws IOException {
// In case of viewfs we need to lookup where the actual file is to know the filesystem in use.
// resolvePath is a sure shot way of knowing which file system the file is.
return "hdfs".equals(fs.resolvePath(path).toUri().getScheme());
}
}
@Override
public WebHCatJTShim getWebHCatShim(Configuration conf, UserGroupInformation ugi) throws IOException {
return new WebHCatJTShim23(conf, ugi);//this has state, so can't be cached
}
}