Package org.apache.falcon.entity.v0.cluster

Examples of org.apache.falcon.entity.v0.cluster.Cluster


    public void onRemove(Entity entity) {
        if (entity.getEntityType() != EntityType.CLUSTER) {
            return;
        }

        Cluster cluster = (Cluster) entity;
        COLO_CLUSTER_MAP.get(cluster.getColo()).remove(cluster.getName());
        if (COLO_CLUSTER_MAP.get(cluster.getColo()).isEmpty()) {
            COLO_CLUSTER_MAP.remove(cluster.getColo());
        }
    }
View Full Code Here


    private static final Logger LOG = Logger.getLogger(LogProvider.class);

    public Instance populateLogUrls(Entity entity, Instance instance,
                                    String runId) throws FalconException {

        Cluster clusterObj = ConfigurationStore.get().get(
                EntityType.CLUSTER, instance.cluster);
        try {
            Configuration conf = ClusterHelper.getConfiguration(clusterObj);
            // fs on behalf of the end user.
            FileSystem fs = HadoopClientFactory.get().createFileSystem(conf);
View Full Code Here

    }

    @Override
    public void afterDelete(Entity entity, String clusterName) throws FalconException {
        try {
            Cluster cluster = EntityUtil.getEntity(EntityType.CLUSTER, clusterName);
            Path entityPath = EntityUtil.getBaseStagingPath(cluster, entity);
            LOG.info("Deleting entity path " + entityPath + " on cluster " + clusterName);

            Configuration conf = ClusterHelper.getConfiguration(cluster);
            FileSystem fs = HadoopClientFactory.get().createFileSystem(conf);
View Full Code Here

    public String getFeedInstanceName(String feedName, String clusterName,
                                      String feedInstancePath) throws FalconException {
        try {
            Feed feed = ConfigurationStore.get().get(EntityType.FEED, feedName);
            Cluster cluster = ConfigurationStore.get().get(EntityType.CLUSTER, clusterName);

            Storage.TYPE storageType = FeedHelper.getStorageType(feed, cluster);
            return storageType == Storage.TYPE.TABLE
                    ? getTableFeedInstanceName(feed, feedInstancePath, storageType)
                    : getFileSystemFeedInstanceName(feedInstancePath, feed, cluster);
View Full Code Here

            feed = STORE.get(EntityType.FEED, feedName);
            long retention = getRetention(feed, feed.getFrequency()
                    .getTimeUnit());
            for (org.apache.falcon.entity.v0.feed.Cluster cluster : feed
                    .getClusters().getClusters()) {
                Cluster currentCluster = STORE.get(EntityType.CLUSTER,
                        cluster.getName());
                if (currentCluster.getColo().equals(getCurrentColo())) {
                    LOG.info("Cleaning up logs & staged data for feed:" + feedName
                            + " in  cluster: " + cluster.getName() + " with retention: " + retention);
                    delete(currentCluster, feed, retention);
                    deleteStagedData(currentCluster, feed, retention);
                } else {
View Full Code Here

    public void setUp() throws Exception {
        cleanupStore();
        ConfigurationStore store = ConfigurationStore.get();

        Unmarshaller unmarshaller = EntityType.CLUSTER.getUnmarshaller();
        Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
                .getResourceAsStream(CLUSTER_XML));
        cluster.setName("testCluster");
        store.publish(EntityType.CLUSTER, cluster);

        cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
                .getResourceAsStream(CLUSTER_XML));
        cluster.setName("backupCluster");
        store.publish(EntityType.CLUSTER, cluster);

        modifiableFeed = parser.parseAndValidate(this.getClass()
                .getResourceAsStream(FEED_XML));
    }
View Full Code Here

        }
    }

    @Test
    public void testClusterPartitionExp() throws FalconException {
        Cluster cluster = ConfigurationStore.get().get(EntityType.CLUSTER,
                "testCluster");
        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
                "/*/${cluster.colo}"), "/*/" + cluster.getColo());
        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
                "/*/${cluster.name}/Local"), "/*/" + cluster.getName() + "/Local");
        Assert.assertEquals(FeedHelper.evaluateClusterExp(cluster,
                "/*/${cluster.field1}/Local"), "/*/value1/Local");
    }
View Full Code Here

    }

    @Test(expectedExceptions = ValidationException.class)
    public void testValidateClusterHasRegistryWithNoRegistryInterface() throws Exception {
        Unmarshaller unmarshaller = EntityType.CLUSTER.getUnmarshaller();
        Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass()
                .getResourceAsStream(("/config/cluster/cluster-no-registry.xml")));
        cluster.setName("badTestCluster");
        ConfigurationStore.get().publish(EntityType.CLUSTER, cluster);


        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
        Feed feedWithTable = parser.parse(inputStream);
        Validity validity = modifiableFeed.getClusters().getClusters().get(0)
                .getValidity();
        feedWithTable.getClusters().getClusters().clear();

        org.apache.falcon.entity.v0.feed.Cluster feedCluster =
                new org.apache.falcon.entity.v0.feed.Cluster();
        feedCluster.setName(cluster.getName());
        feedCluster.setValidity(validity);
        feedWithTable.getClusters().getClusters().add(feedCluster);

        parser.validate(feedWithTable);
        Assert.fail("An exception should have been thrown: Cluster should have registry interface defined with table"
View Full Code Here

        Unmarshaller unmarshaller = type.getUnmarshaller();
        store = ConfigurationStore.get();
        store.remove(type, name);
        switch (type) {
        case CLUSTER:
            Cluster cluster = (Cluster) unmarshaller.unmarshal(this.getClass().getResource(CLUSTER_XML));
            cluster.setName(name);
            ClusterHelper.getInterface(cluster, Interfacetype.WRITE).setEndpoint(conf.get("fs.default.name"));
            store.publish(type, cluster);
            break;

        case FEED:
View Full Code Here

    private void initializeStagingDirs() throws Exception {
        final InputStream inputStream = getClass().getResourceAsStream("/config/feed/hive-table-feed.xml");
        Feed tableFeed = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(inputStream);
        getStore().publish(EntityType.FEED, tableFeed);

        final Cluster srcCluster = dfsCluster.getCluster();
        final CatalogStorage sourceStorage = (CatalogStorage) FeedHelper.createStorage(srcCluster, tableFeed);
        String sourceStagingDir = FeedHelper.getStagingDir(srcCluster, tableFeed, sourceStorage, Tag.REPLICATION);

        sourceStagingPath1 = new Path(sourceStagingDir + "/ds=2012092400/" + System.currentTimeMillis());
        sourceStagingPath2 = new Path(sourceStagingDir + "/ds=2012092500/" + System.currentTimeMillis());

        final Cluster targetCluster = targetDfsCluster.getCluster();
        final CatalogStorage targetStorage = (CatalogStorage) FeedHelper.createStorage(targetCluster, tableFeed);
        String targetStagingDir = FeedHelper.getStagingDir(targetCluster, tableFeed, targetStorage, Tag.REPLICATION);

        targetStagingPath1 = new Path(targetStagingDir + "/ds=2012092400/" + System.currentTimeMillis());
        targetStagingPath2 = new Path(targetStagingDir + "/ds=2012092500/" + System.currentTimeMillis());
View Full Code Here

TOP

Related Classes of org.apache.falcon.entity.v0.cluster.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.