Examples of DataNode


Examples of org.apache.cayenne.access.DataNode

        assertEquals(pk, pkAfterCommit);
    }

    void updateId(DataContext context, ObjectId id) throws Exception {
        DbEntity entity = context.getEntityResolver().getDbEntity("ARTIST");
        DataNode node = context.getParentDataDomain().lookupDataNode(entity.getDataMap());

        Object pk = node.getAdapter().getPkGenerator().generatePk(
                node,
                entity.getPrimaryKeys().iterator().next());
        id.getReplacementIdMap().put(Artist.ARTIST_ID_PK_COLUMN, pk);
    }
View Full Code Here

Examples of org.apache.cayenne.access.DataNode

    protected void setUp() throws Exception {
        super.setUp();

        deleteTestData();
        createTestData("testArtists");
        DataNode orgNode = getDomain().getDataNodes().iterator().next();
       
       
        // clone DataMap by saving and loading from XML as to avoid modifying shared test
        // DataMap
        DataMap originalMap = getDomain().getDataMap("testmap");
        StringWriter out = new StringWriter();
        PrintWriter outWriter = new PrintWriter(out);
        originalMap.encodeAsXML(outWriter);
        outWriter.flush();
        StringReader in = new StringReader(out.toString());
        map = new MapLoader().loadDataMap(new InputSource(in));
       
        // map must operate in an EntityResolve namespace...
        EntityResolver testResolver = new EntityResolver();
        testResolver.addDataMap(map);

        node = new DataNode("mergenode");
        node.setAdapter(orgNode.getAdapter());
        node.setDataSource(orgNode.getDataSource());
        node.addDataMap(map);
       
        domain = new DataDomain("mergetestdomain");
        domain.setEntitySorter(new AshwoodEntitySorter());
        domain.addNode(node);
View Full Code Here

Examples of org.apache.cayenne.access.DataNode

       
        dataDomain.getEntityResolver().applyDBLayerDefaults();
        dataDomain.getEntityResolver().applyObjectLayerDefaults();

        for (DataNodeDescriptor nodeDescriptor : descriptor.getNodeDescriptors()) {
            DataNode dataNode = new DataNode(nodeDescriptor.getName());

            dataNode.setDataSourceLocation(nodeDescriptor.getParameters());

            DataSource dataSource = dataSourceFactory.getDataSource(nodeDescriptor);

            dataNode.setDataSourceFactory(nodeDescriptor.getDataSourceFactoryType());
            dataNode.setDataSource(dataSource);

            // schema update strategy
            String schemaUpdateStrategyType = nodeDescriptor
                    .getSchemaUpdateStrategyType();

            if (schemaUpdateStrategyType == null) {
                dataNode.setSchemaUpdateStrategy(defaultSchemaUpdateStrategy);
                dataNode.setSchemaUpdateStrategyName(defaultSchemaUpdateStrategy
                        .getClass()
                        .getName());
            }
            else {
                SchemaUpdateStrategy strategy = objectFactory.newInstance(
                        SchemaUpdateStrategy.class,
                        schemaUpdateStrategyType);
                dataNode.setSchemaUpdateStrategyName(schemaUpdateStrategyType);
                dataNode.setSchemaUpdateStrategy(strategy);
            }

            // DbAdapter
            dataNode.setAdapter(adapterFactory.createAdapter(nodeDescriptor, dataSource));

            // DataMaps
            for (String dataMapName : nodeDescriptor.getDataMapNames()) {
                dataNode.addDataMap(dataDomain.getDataMap(dataMapName));
            }

            dataDomain.addNode(dataNode);
        }
View Full Code Here

Examples of org.apache.hadoop.dfs.DataNode

      // start a third datanode
      cluster.startDataNodes(conf, 1, true, null, null);
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(datanodes.size(), 3);
      DataNode dataNode = datanodes.get(2);
     
      // report corrupted block by the third datanode
      cluster.getNameNode().namesystem.markBlockAsCorrupt(blk,
          new DatanodeInfo(dataNode.dnRegistration ));
     
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

      DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes));

      //update blocks with random block sizes
      Block[] newblocks = new Block[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
        DataNode dn = datanodes[i];
        FSDatasetTestUtil.truncateBlock(dn, lastblock, newblocksizes[i]);
        newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i],
            lastblock.getGenerationStamp());
        checkMetaInfo(newblocks[i], idps[i]);
      }

      DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
      cluster.getNameNode().append(filestr, dfs.dfs.clientName);

      //block synchronization
      final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length);
      DataNode.LOG.info("primarydatanodeindex  =" + primarydatanodeindex);
      DataNode primary = datanodes[primarydatanodeindex];
      DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration);
      primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join();

      BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
      int minsize = min(newblocksizes);
      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
      lastblock.setGenerationStamp(currentGS);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

      }
      Configuration newconf = new Configuration(dnConf); // save config
      if (hosts != null) {
        NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
      }
      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
      //since the HDFS does things based on IP:port, we need to add the mapping
      //for IP:port to rackId
      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
      if (racks != null) {
        int port = dn.getSelfAddr().getPort();
        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
                            " to rack " + racks[i-curDatanodesNum]);
        StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                  racks[i-curDatanodesNum]);
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

   * Gets a list of the started DataNodes.  May be empty.
   */
  public ArrayList<DataNode> getDataNodes() {
    ArrayList<DataNode> list = new ArrayList<DataNode>();
    for (int i = 0; i < dataNodes.size(); i++) {
      DataNode node = dataNodes.get(i).datanode;
      list.add(node);
    }
    return list;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

   * is left running so that new DataNodes may be started.
   */
  public void shutdownDataNodes() {
    for (int i = dataNodes.size()-1; i >= 0; i--) {
      System.out.println("Shutting down DataNode " + i);
      DataNode dn = dataNodes.remove(i).datanode;
      dn.shutdown();
      numDataNodes--;
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

  public synchronized DataNodeProperties stopDataNode(int i) {
    if (i < 0 || i >= dataNodes.size()) {
      return null;
    }
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    System.out.println("MiniDFSCluster Stopping DataNode " +
                       dn.dnRegistration.getName() +
                       " from a total of " + (dataNodes.size() + 1) +
                       " datanodes.");
    dn.shutdown();
    numDataNodes--;
    return dnprop;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.datanode.DataNode

   * Shutdown a datanode by name.
   */
  public synchronized DataNodeProperties stopDataNode(String name) {
    int i;
    for (i = 0; i < dataNodes.size(); i++) {
      DataNode dn = dataNodes.get(i).datanode;
      if (dn.dnRegistration.getName().equals(name)) {
        break;
      }
    }
    return stopDataNode(i);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.