Package org.apache.hadoop.security

Examples of org.apache.hadoop.security.Credentials


           * BACKPORTED (MAPREDUCE-873)
           */
          job = new JobInProgress(JobTracker.this, conf,
              new JobInfo((org.apache.hadoop.mapreduce.JobID) id,
                new Text(user), new Path(getStagingAreaDirInternal(user))),
              restartCount, new Credentials() /*HACK*/);

          // 2. Check if the user has appropriate access
          // Get the user group info for the job's owner
          UserGroupInformation ugi =
            UserGroupInformation.createRemoteUser(job.getJobConf().getUser());
View Full Code Here


 
  @Test
  public void testGetTokensForNamenodes() throws IOException {
    FileSystem fs = dfsCluster.getFileSystem();

    Credentials credentials = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(credentials, new Path [] {p1, p2},
                                        jConf);
    // this token is keyed by hostname:port key.
    String fs_addr =
      SecurityUtil.buildDTServiceName(p1.toUri(), NameNode.DEFAULT_PORT);
    Token<DelegationTokenIdentifier> nnt =
      TokenCache.getDelegationToken(credentials, fs_addr);

    assertNotNull("Token for nn is null", nnt);

    // verify the size
    Collection<Token<? extends TokenIdentifier>> tns =
      credentials.getAllTokens();
    assertEquals("number of tokens is not 1", 1, tns.size());

    boolean found = false;
    for(Token<? extends TokenIdentifier> t: tns) {
      if(t.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND) &&
View Full Code Here

        //and populate the credential in the job.
      try {
          ts = job.getCredentials();
          Path p1 = new Path("file1");
          p1 = p1.getFileSystem(job).makeQualified(p1);
          Credentials cred = new Credentials();
          TokenCache.obtainTokensForNamenodesInternal(cred, new Path [] {p1}, job);
          for (Token<? extends TokenIdentifier> t: cred.getAllTokens()) {
            ts.addToken(new Text("Hdfs"), t);
          }
      } catch (IOException e) {
        Assert.fail("Exception "+e);
      }
View Full Code Here

    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    out = new DataOutputStream(baos);
    new DelegationTokenFetcher(dfs, out, ugi, conf).go();
   
    // now read the data back in and verify correct values
    Credentials ts = new Credentials();
    DataInputStream dis =
      new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
   
    ts.readTokenStorageStream(dis);
    Token<? extends TokenIdentifier> newToken = ts.getToken(new Text(SHORT_NAME));
   
    assertEquals("Should only be one token in storage", ts.numberOfTokens(), 1);
    assertEquals("Service value should have survived",
        "127.0.0.1:2005", newToken.getService().toString());
  }
View Full Code Here

    JVMId jvmId = new JVMId(firstTaskid.getJobID(),firstTaskid.isMap(),jvmIdInt);

    // file name is passed thru env
    String jobTokenFile =
      System.getenv().get(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
    Credentials credentials =
      TokenCache.loadTokens(jobTokenFile, defaultConf);
    LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() +
        "; from file=" + jobTokenFile);
   
    Token<JobTokenIdentifier> jt = TokenCache.getJobToken(credentials);
    jt.setService(new Text(address.getAddress().getHostAddress() + ":"
        + address.getPort()));
View Full Code Here

   */
  private void generateAndStoreTokens() throws IOException {
    Path jobDir = jobtracker.getSystemDirectoryForJob(jobId);
    Path keysFile = new Path(jobDir, TokenCache.JOB_TOKEN_HDFS_FILE);
    if (tokenStorage == null) {
      tokenStorage = new Credentials();
    }
    //create JobToken file and write token to it
    JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(jobId
        .toString()));
    Token<JobTokenIdentifier> token = new Token<JobTokenIdentifier>(identifier,
View Full Code Here

      // Setup up task credentials buffer
      LOG.info("Adding #" + credentials.numberOfTokens()
          + " tokens and #" + credentials.numberOfSecretKeys()
          + " secret keys for NM use for launching container");
      Credentials taskCredentials = new Credentials(credentials);

      // LocalStorageToken is needed irrespective of whether security is enabled
      // or not.
      TokenCache.setJobToken(jobToken, taskCredentials);

      DataOutputBuffer containerTokens_dob = new DataOutputBuffer();
      LOG.info("Size of containertokens_dob is "
          + taskCredentials.numberOfTokens());
      taskCredentials.writeTokenStorageToStream(containerTokens_dob);
      taskCredentialsBuffer =
          ByteBuffer.wrap(containerTokens_dob.getData(), 0,
              containerTokens_dob.getLength());

      // Add shuffle secret key
View Full Code Here

    public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
        Configuration conf, EventHandler eventHandler, boolean newApiCommitter,
        String user, int numSplits, AppContext appContext) {
      super(jobId, applicationAttemptId, conf, eventHandler,
          null, new JobTokenSecretManager(), new Credentials(),
          new SystemClock(), Collections.<TaskId, TaskInfo> emptyMap(),
          MRAppMetrics.create(), null, newApiCommitter, user,
          System.currentTimeMillis(), null, appContext, null, null);

      initTransition = getInitTransition(numSplits);
View Full Code Here

    JobId jobId = TypeConverter.toYarn(jobID);
    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
    JobImpl job =
        new JobImpl(jobId, ApplicationAttemptId.newInstance(
          ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
          null, new JobTokenSecretManager(), new Credentials(), null, null,
          mrAppMetrics, null, true, null, 0, null, null, null, null);
    InitTransition initTransition = getInitTransition(2);
    JobEvent mockJobEvent = mock(JobEvent.class);
    initTransition.transition(job, mockJobEvent);
    boolean isUber = job.isUber();
View Full Code Here

    JobId jobId = TypeConverter.toYarn(jobID);
    MRAppMetrics mrAppMetrics = MRAppMetrics.create();
    JobImpl job =
        new JobImpl(jobId, ApplicationAttemptId.newInstance(
          ApplicationId.newInstance(0, 0), 0), conf, mock(EventHandler.class),
          null, new JobTokenSecretManager(), new Credentials(), null, null,
          mrAppMetrics, null, true, null, 0, null, null, null, null);
    InitTransition initTransition = new InitTransition() {
        @Override
        protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
          throw new YarnRuntimeException(EXCEPTIONMSG);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.security.Credentials

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.