Package org.apache.hadoop.mapred

Examples of org.apache.hadoop.mapred.JobTracker


        return myJob;
      }
    });

    // Make the job go into RUNNING state by forceful initialization.
    JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
    JobInProgress jip =
        jt.getJob(org.apache.hadoop.mapred.JobID.downgrade(job.getJobID()));
    jt.initJob(jip);

    return job;
  }
View Full Code Here


    return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
  }

  @Test
  public void testDelegationToken() throws Exception {
    final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
    config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
        "kerberos");
    // Set configuration again so that job tracker finds security enabled
    UserGroupInformation.setConfiguration(config);
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      public Object run() throws Exception {
        try {
          Token<DelegationTokenIdentifier> token = jt
              .getDelegationToken(new Text(ugi.getShortUserName()));
          jt.renewDelegationToken(token);
          jt.cancelDelegationToken(token);
        } catch (IOException e) {
          e.printStackTrace();
          throw e;
        }
        return null;
View Full Code Here

    });
  }
 
  @Test
  public void testGetDelegationTokenWithoutKerberos() throws Exception {
    final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
    config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
        "kerberos");
    // Set configuration again so that job tracker finds security enabled
    UserGroupInformation.setConfiguration(config);
    Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      public Object run() throws Exception {
        try {
          Token<DelegationTokenIdentifier> token = jt
              .getDelegationToken(new Text("arenewer"));
          Assert.assertTrue(token != null);
          Assert
              .fail("Delegation token should not be issued without Kerberos authentication");
        } catch (IOException e) {
View Full Code Here

    });
  }

  @Test
  public void testRenewDelegationTokenWithoutKerberos() throws Exception {
    final JobTracker jt = cluster.getJobTrackerRunner().getJobTracker();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
    config.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
        "kerberos");
    // Set configuration again so that job tracker finds security enabled
    UserGroupInformation.setConfiguration(config);
    Assert.assertTrue(UserGroupInformation.isSecurityEnabled());
    final Token<DelegationTokenIdentifier> token = generateDelegationToken(
        "owner", ugi.getShortUserName());
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
      public Object run() throws Exception {
        try {
          jt.renewDelegationToken(token);
          Assert
              .fail("Delegation token should not be renewed without Kerberos authentication");
        } catch (IOException e) {
          // success
        }
View Full Code Here

    }

    @Override
    public void shutdown() throws IOException {
      MiniMRCluster.JobTrackerRunner runner = mr.getJobTrackerRunner();
      JobTracker tracker = runner.getJobTracker();
      if (tracker != null) {
        for (JobInProgress running : tracker.getRunningJobs()) {
          try {
            running.kill();
          } catch (Exception e) {
            // ignore
          }
View Full Code Here

        return myJob;
      }
    });

    // Make the job go into RUNNING state by forceful initialization.
    JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
    JobInProgress jip =
        jt.getJob(org.apache.hadoop.mapred.JobID.downgrade(job.getJobID()));
    jt.initJob(jip);

    return job;
  }
View Full Code Here

            NameNode.format(conf);
            final String[] emptyArgs = {};
            NameNode.createNameNode(emptyArgs, conf);
            DataNode.createDataNode(emptyArgs, conf);
            final JobTracker jobTracker = JobTracker.startTracker(new JobConf(conf));
            new Thread(new Runnable() {
                @Override
                public void run() {
                    try {
                        jobTracker.offerService();
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            }).start();
View Full Code Here

        for (Enumeration e = properties.keys(); e.hasMoreElements();) {
            Object key = e.nextElement();
            Object val = properties.get(key);
            conf.set( key.toString(), val.toString() );
        }
        JobTracker jobTracker = JobTracker.startTracker(conf);
        jobTracker.offerService();
        return jobTracker;
    }
View Full Code Here

     */
    private static Thread getJobTrackerThread() {
        Thread jobTrackerThread = new Thread(new Runnable() {

            public void run() {
                JobTracker jobTracker = null;

                while (true) {
                    try {
                        jobTracker = JobTracker.startTracker(new CassandraJobConf());
                        logger.info("Hadoop Job Tracker Started...");
                        jobTrackerStarted.countDown();
                        jobTracker.offerService();

                    } catch (Throwable t) {
                        if (t instanceof InterruptedException) {
                            try {
                                jobTracker.stopTracker();
                                logger.info("Job Tracker shutdown property");
                            } catch (Exception e) {
                                logger.error("An Error occured when stopping Job tracker");
                            }
                        }

                        // on OOM shut down the tracker
                        if (t instanceof OutOfMemoryError || t.getCause() instanceof OutOfMemoryError) {
                            try {
                                jobTracker.stopTracker();
                            } catch (IOException e) {

                            }
                            logger.warn("Error starting job tracker", t);
                            break;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapred.JobTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.