Package org.apache.hadoop.http

Examples of org.apache.hadoop.http.HttpServer2$SelectChannelConnectorWithSafeStartup


  /**
   * Test to verify the read timeout
   */
  @Test(timeout = 5000)
  public void testGetImageTimeout() throws Exception {
    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
    try {
      testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
          TestImageTransferServlet.class);
      testServer.start();
      URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
      TransferFsImage.timeout = 2000;
      try {
        TransferFsImage.getFileClient(serverURL, "txid=1", null,
            null, false);
        fail("TransferImage Should fail with timeout");
      } catch (SocketTimeoutException e) {
        assertEquals("Read should timeout", "Read timed out", e.getMessage());
      }
    } finally {
      if (testServer != null) {
        testServer.stop();
      }
    }
  }
View Full Code Here


   */
  @Test(timeout = 10000)
  public void testImageUploadTimeout() throws Exception {
    Configuration conf = new HdfsConfiguration();
    NNStorage mockStorage = Mockito.mock(NNStorage.class);
    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
    try {
      testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
          TestImageTransferServlet.class);
      testServer.start();
      URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
      // set the timeout here, otherwise it will take default.
      TransferFsImage.timeout = 2000;

      File tmpDir = new File(new FileSystemTestHelper().getTestRootDir());
      tmpDir.mkdirs();

      File mockImageFile = File.createTempFile("image", "", tmpDir);
      FileOutputStream imageFile = new FileOutputStream(mockImageFile);
      imageFile.write("data".getBytes());
      imageFile.close();
      Mockito.when(
          mockStorage.findImageFile(Mockito.any(NameNodeFile.class),
              Mockito.anyLong())).thenReturn(mockImageFile);
      Mockito.when(mockStorage.toColonSeparatedString()).thenReturn(
          "storage:info:string");
     
      try {
        TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage,
            NameNodeFile.IMAGE, 1L);
        fail("TransferImage Should fail with timeout");
      } catch (SocketTimeoutException e) {
        assertEquals("Upload should timeout", "Read timed out", e.getMessage());
      }
    } finally {
      testServer.stop();
    }
  }
View Full Code Here

  /**
   * Test to verify the read timeout
   */
  @Test(timeout = 5000)
  public void testImageTransferTimeout() throws Exception {
    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
    try {
      testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
      testServer.start();
      URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
      TransferFsImage.timeout = 2000;
      try {
        TransferFsImage.getFileClient(serverURL, "txid=1", null,
            null, false);
        fail("TransferImage Should fail with timeout");
      } catch (SocketTimeoutException e) {
        assertEquals("Read should timeout", "Read timed out", e.getMessage());
      }
    } finally {
      if (testServer != null) {
        testServer.stop();
      }
    }
  }
View Full Code Here

        if (hasSpnegoConf) {
          builder.setUsernameConfKey(spnegoPrincipalKey)
              .setKeytabConfKey(spnegoKeytabKey)
              .setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
        }
        HttpServer2 server = builder.build();

        for(ServletStruct struct: servlets) {
          server.addServlet(struct.name, struct.spec, struct.clazz);
        }
        for(Map.Entry<String, Object> entry : attributes.entrySet()) {
          server.setAttribute(entry.getKey(), entry.getValue());
        }
        HttpServer2.defineFilter(server.getWebAppContext(), "guice",
          GuiceFilter.class.getName(), null, new String[] { "/*" });

        webapp.setConf(conf);
        webapp.setHttpServer(server);
        server.start();
        LOG.info("Web app /"+ name +" started at "+ server.getConnectorAddress(0).getPort());
      } catch (ClassNotFoundException e) {
        throw new WebAppException("Error starting http server", e);
      } catch (IOException e) {
        throw new WebAppException("Error starting http server", e);
      }
View Full Code Here

      log.debug("log.debug1");
      log.info("log.info1");
      log.error("log.error1");
      assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));

      HttpServer2 server = new HttpServer2.Builder().setName("..")
          .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
          .build();
     
      server.start();
      String authority = NetUtils.getHostPortString(server
          .getConnectorAddress(0));

      //servlet
      URL url = new URL("http://" + authority + "/logLevel?log=" + logName
          + "&level=" + Level.ERROR);
View Full Code Here

  }

  @Test
  public void testNotificationOnLastRetryNormalShutdown() throws Exception {
    HttpServer2 server = startHttpServer();
    // Act like it is the second attempt. Default max attempts is 2
    MRApp app = spy(new MRAppWithCustomContainerAllocator(
        2, 2, true, this.getClass().getName(), true, 2, true));
    doNothing().when(app).sysexit();
    JobConf conf = new JobConf();
    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
    JobImpl job = (JobImpl)app.submit(conf);
    app.waitForInternalState(job, JobStateInternal.SUCCEEDED);
    // Unregistration succeeds: successfullyUnregistered is set
    app.shutDownJob();
    Assert.assertTrue(app.isLastAMRetry());
    Assert.assertEquals(1, JobEndServlet.calledTimes);
    Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",
        JobEndServlet.requestUri.getQuery());
    Assert.assertEquals(JobState.SUCCEEDED.toString(),
      JobEndServlet.foundJobState);
    server.stop();
  }
View Full Code Here

  }

  @Test
  public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
      throws Exception {
    HttpServer2 server = startHttpServer();
    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
        this.getClass().getName(), true, 1, false));
    doNothing().when(app).sysexit();
    JobConf conf = new JobConf();
    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
    JobImpl job = (JobImpl)app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    app.getContext().getEventHandler()
      .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
    app.waitForInternalState(job, JobStateInternal.REBOOT);
    // Now shutdown.
    // Unregistration fails: isLastAMRetry is recalculated, this is not
    app.shutDownJob();
    // Not the last AM attempt. So user should that the job is still running.
    app.waitForState(job, JobState.RUNNING);
    Assert.assertFalse(app.isLastAMRetry());
    Assert.assertEquals(0, JobEndServlet.calledTimes);
    Assert.assertNull(JobEndServlet.requestUri);
    Assert.assertNull(JobEndServlet.foundJobState);
    server.stop();
  }
View Full Code Here

  }

  @Test
  public void testNotificationOnLastRetryUnregistrationFailure()
      throws Exception {
    HttpServer2 server = startHttpServer();
    MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
        this.getClass().getName(), true, 2, false));
    doNothing().when(app).sysexit();
    JobConf conf = new JobConf();
    conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,
        JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
    JobImpl job = (JobImpl)app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    app.getContext().getEventHandler()
      .handle(new JobEvent(app.getJobId(), JobEventType.JOB_AM_REBOOT));
    app.waitForInternalState(job, JobStateInternal.REBOOT);
    // Now shutdown. User should see FAILED state.
    // Unregistration fails: isLastAMRetry is recalculated, this is
    app.shutDownJob();
    Assert.assertTrue(app.isLastAMRetry());
    Assert.assertEquals(1, JobEndServlet.calledTimes);
    Assert.assertEquals("jobid=" + job.getID() + "&status=FAILED",
        JobEndServlet.requestUri.getQuery());
    Assert.assertEquals(JobState.FAILED.toString(),
      JobEndServlet.foundJobState);
    server.stop();
  }
View Full Code Here

  }

  private static HttpServer2 startHttpServer() throws Exception {
    new File(System.getProperty(
        "build.webapps", "build/webapps") + "/test").mkdirs();
    HttpServer2 server = new HttpServer2.Builder().setName("test")
        .addEndpoint(URI.create("http://localhost:0"))
        .setFindPort(true).build();
    server.addServlet("jobend", "/jobend", JobEndServlet.class);
    server.start();

    JobEndServlet.calledTimes = 0;
    JobEndServlet.requestUri = null;
    JobEndServlet.baseUrl = "http://localhost:"
        + server.getConnectorAddress(0).getPort() + "/";
    JobEndServlet.foundJobState = null;
    return server;
  }
View Full Code Here

  /**
   * Test to verify the read timeout
   */
  @Test(timeout = 5000)
  public void testGetImageTimeout() throws Exception {
    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
    try {
      testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
          TestImageTransferServlet.class);
      testServer.start();
      URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
      TransferFsImage.timeout = 2000;
      try {
        TransferFsImage.getFileClient(serverURL, "txid=1", null,
            null, false);
        fail("TransferImage Should fail with timeout");
      } catch (SocketTimeoutException e) {
        assertEquals("Read should timeout", "Read timed out", e.getMessage());
      }
    } finally {
      if (testServer != null) {
        testServer.stop();
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.http.HttpServer2$SelectChannelConnectorWithSafeStartup

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.