Package com.alibaba.otter.shared.common.model.config.pipeline

Examples of com.alibaba.otter.shared.common.model.config.pipeline.Pipeline


    public void start() {
        if (running) {
            return;
        }
        // 获取destination/filter参数
        Pipeline pipeline = configClientService.findPipeline(pipelineId);
        filter = makeFilterExpression(pipeline);
        destination = pipeline.getParameters().getDestinationName();
        batchSize = pipeline.getParameters().getMainstemBatchsize();
        batchTimeout = pipeline.getParameters().getBatchTimeout();
        ddlSync = pipeline.getParameters().getDdlSync();
        if (pipeline.getParameters().getDumpSelector() != null) {
            dump = pipeline.getParameters().getDumpSelector();
        }

        if (pipeline.getParameters().getDumpSelectorDetail() != null) {
            dumpDetail = pipeline.getParameters().getDumpSelectorDetail();
        }

        canalServer.setCanalInstanceGenerator(new CanalInstanceGenerator() {

            public CanalInstance generate(String destination) {
                Canal canal = canalConfigClient.findCanal(destination);
                final OtterAlarmHandler otterAlarmHandler = new OtterAlarmHandler();
                otterAlarmHandler.setPipelineId(pipelineId);
                OtterContextLocator.autowire(otterAlarmHandler); // 注入一下spring资源
                // 设置下slaveId,保证多个piplineId下重复引用时不重复
                long slaveId = 10000;// 默认基数
                if (canal.getCanalParameter().getSlaveId() != null) {
                    slaveId = canal.getCanalParameter().getSlaveId();
                }
                canal.getCanalParameter().setSlaveId(slaveId + pipelineId);
                canal.getCanalParameter().setDdlIsolation(ddlSync);

                CanalInstanceWithManager instance = new CanalInstanceWithManager(canal, filter) {

                    protected CanalHAController initHaController() {
                        HAMode haMode = parameters.getHaMode();
                        if (haMode.isMedia()) {
                            return new MediaHAController(parameters.getMediaGroup(),
                                parameters.getDbUsername(),
                                parameters.getDbPassword(),
                                parameters.getDefaultDatabaseName());
                        } else {
                            return super.initHaController();
                        }
                    }

                    protected void startEventParserInternal(CanalEventParser parser, boolean isGroup) {
                        super.startEventParserInternal(parser, isGroup);

                        if (eventParser instanceof MysqlEventParser) {
                            MysqlEventParser mysqlEventParser = (MysqlEventParser) eventParser;
                            CanalHAController haController = mysqlEventParser.getHaController();

                            if (haController instanceof MediaHAController) {
                                if (isGroup) {
                                    throw new CanalException("not support group database use media HA");
                                }

                                ((MediaHAController) haController).setCanalHASwitchable(mysqlEventParser);
                            }

                            if (!haController.isStart()) {
                                haController.start();
                            }

                            // 基于media的Ha,直接从tddl中获取数据库信息
                            if (haController instanceof MediaHAController) {
                                AuthenticationInfo authenticationInfo = ((MediaHAController) haController).getAvailableAuthenticationInfo();
                                ((MysqlEventParser) eventParser).setMasterInfo(authenticationInfo);
                            }
                        }
                    }

                };
                instance.setAlarmHandler(otterAlarmHandler);

                CanalEventSink eventSink = instance.getEventSink();
                if (eventSink instanceof AbstractCanalEventSink) {
                    handler = new OtterDownStreamHandler();
                    handler.setPipelineId(pipelineId);
                    handler.setDetectingIntervalInSeconds(canal.getCanalParameter().getDetectingIntervalInSeconds());
                    OtterContextLocator.autowire(handler); // 注入一下spring资源
                    ((AbstractCanalEventSink) eventSink).addHandler(handler, 0); // 添加到开头
                    handler.start();
                }

                return instance;
            }
        });
        canalServer.start();

        canalServer.start(destination);
        this.clientIdentity = new ClientIdentity(destination, pipeline.getParameters().getMainstemClientId(), filter);
        canalServer.subscribe(clientIdentity);// 发起一次订阅

        running = true;
    }
View Full Code Here


     * 3. retl.xdual canal心跳表数据过滤
     * </pre>
     */
    public List<EventData> parse(Long pipelineId, List<Entry> datas) throws SelectException {
        List<EventData> eventDatas = new ArrayList<EventData>();
        Pipeline pipeline = configClientService.findPipeline(pipelineId);
        List<Entry> transactionDataBuffer = new ArrayList<Entry>();
        // hz为主站点,us->hz的数据,需要回环同步会us。并且需要开启回环补救算法
        PipelineParameter pipelineParameter = pipeline.getParameters();
        boolean enableLoopbackRemedy = pipelineParameter.isEnableRemedy() && pipelineParameter.isHome()
                                       && pipelineParameter.getRemedyAlgorithm().isLoopback();
        boolean isLoopback = false;
        boolean needLoopback = false; // 判断是否属于需要loopback处理的类型,只处理正常otter同步产生的回环数据,因为会有业务方手工屏蔽同步的接口,避免回环

        long now = new Date().getTime();
        try {
            for (Entry entry : datas) {
                switch (entry.getEntryType()) {
                    case TRANSACTIONBEGIN:
                        isLoopback = false;
                        break;
                    case ROWDATA:
                        String tableName = entry.getHeader().getTableName();
                        // 判断是否是回环表retl_mark
                        boolean isMarkTable = tableName.equalsIgnoreCase(pipeline.getParameters().getSystemMarkTable());
                        if (isMarkTable) {
                            RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
                            if (!rowChange.getIsDdl()) {
                                int loopback = checkLoopback(pipeline, rowChange.getRowDatas(0));
                                if (loopback == 2) {
                                    needLoopback |= true; // 只处理正常同步产生的回环数据
                                }

                                isLoopback |= loopback > 0;
                            }
                        }

                        // 检查下otter3.0的回环表,对应的schmea会比较随意,所以不做比较
                        boolean isCompatibleLoopback = tableName.equalsIgnoreCase(compatibleMarkTable);

                        if (isCompatibleLoopback) {
                            RowChange rowChange = RowChange.parseFrom(entry.getStoreValue());
                            if (!rowChange.getIsDdl()) {
                                int loopback = checkCompatibleLoopback(pipeline, rowChange.getRowDatas(0));
                                if (loopback == 2) {
                                    needLoopback |= true; // 只处理正常同步产生的回环数据
                                }
                                isLoopback |= loopback > 0;
                            }
                        }

                        if ((!isLoopback || (enableLoopbackRemedy && needLoopback)) && !isMarkTable
                            && !isCompatibleLoopback) {
                            transactionDataBuffer.add(entry);
                        }
                        break;
                    case TRANSACTIONEND:
                        if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
                            // 添加数据解析
                            for (Entry bufferEntry : transactionDataBuffer) {
                                List<EventData> parseDatas = internParse(pipeline, bufferEntry);
                                if (CollectionUtils.isEmpty(parseDatas)) {// 可能为空,针对ddl返回时就为null
                                    continue;
                                }

                                // 初步计算一下事件大小
                                long totalSize = bufferEntry.getHeader().getEventLength();
                                long eachSize = totalSize / parseDatas.size();
                                for (EventData eventData : parseDatas) {
                                    if (eventData == null) {
                                        continue;
                                    }

                                    eventData.setSize(eachSize);// 记录一下大小
                                    if (needLoopback) {// 针对需要回环同步的
                                        // 如果延迟超过指定的阀值,则设置为需要反查db
                                        if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters()
                                            .getRemedyDelayThresoldForMedia()) {
                                            eventData.setSyncConsistency(SyncConsistency.MEDIA);
                                        } else {
                                            eventData.setSyncConsistency(SyncConsistency.BASE);
                                        }
                                        eventData.setRemedy(true);
                                    }
                                    eventDatas.add(eventData);
                                }
                            }
                        }

                        isLoopback = false;
                        needLoopback = false;
                        transactionDataBuffer.clear();
                        break;
                    default:
                        break;
                }
            }

            // 添加最后一次的数据,可能没有TRANSACTIONEND
            if (!isLoopback || (enableLoopbackRemedy && needLoopback)) {
                // 添加数据解析
                for (Entry bufferEntry : transactionDataBuffer) {
                    List<EventData> parseDatas = internParse(pipeline, bufferEntry);
                    if (CollectionUtils.isEmpty(parseDatas)) {// 可能为空,针对ddl返回时就为null
                        continue;
                    }

                    // 初步计算一下事件大小
                    long totalSize = bufferEntry.getHeader().getEventLength();
                    long eachSize = totalSize / parseDatas.size();
                    for (EventData eventData : parseDatas) {
                        if (eventData == null) {
                            continue;
                        }

                        eventData.setSize(eachSize);// 记录一下大小
                        if (needLoopback) {// 针对需要回环同步的
                            // 如果延迟超过指定的阀值,则设置为需要反查db
                            if (now - eventData.getExecuteTime() > 1000 * pipeline.getParameters()
                                .getRemedyDelayThresoldForMedia()) {
                                eventData.setSyncConsistency(SyncConsistency.MEDIA);
                            } else {
                                eventData.setSyncConsistency(SyncConsistency.BASE);
                            }
View Full Code Here

        Assert.notNull(urlFormat);
    }

    public String getUrl(Long pipelineId, String filePath) {
        Node node = configClientService.currentNode();
        Pipeline pipeline = configClientService.findPipeline(pipelineId);
        String ip = node.getIp();
        if (pipeline.getParameters().getUseExternalIp()) {
            ip = node.getParameters().getExternalIp();

            if (StringUtils.isEmpty(ip)) {
                throw new ArchiveException(String.format("pipelineId:%s useExternalIp by nid[%s] has no external ip",
                                                         String.valueOf(pipelineId), String.valueOf(node.getId())));
View Full Code Here

        HttpPipeKey key = new HttpPipeKey();
        key.setUrl(remoteUrlBuilder.getUrl(rowBatch.getIdentity().getPipelineId(), filename));
        key.setDataType(PipeDataType.DB_BATCH);
        key.setIdentity(rowBatch.getIdentity());
        Pipeline pipeline = configClientService.findPipeline(rowBatch.getIdentity().getPipelineId());
        if (pipeline.getParameters().getUseFileEncrypt()) {
            // 加密处理
            EncryptedData encryptedData = encryptFile(file);
            key.setKey(encryptedData.getKey());
            key.setCrc(encryptedData.getCrc());
        }
View Full Code Here

    }

    // 处理对应的dbBatch
    private DbBatch getDbBatch(HttpPipeKey key) {
        String dataUrl = key.getUrl();
        Pipeline pipeline = configClientService.findPipeline(key.getIdentity().getPipelineId());
        DataRetriever dataRetriever = dataRetrieverFactory.createRetriever(pipeline.getParameters().getRetriever(),
                                                                           dataUrl, downloadDir);
        File archiveFile = null;
        try {
            dataRetriever.connect();
            dataRetriever.doRetrieve();
View Full Code Here

    private DbLoadContext buildContext(Identity identity) {
        DbLoadContext context = new DbLoadContext();
        context.setIdentity(identity);
        Channel channel = configClientService.findChannel(identity.getChannelId());
        Pipeline pipeline = configClientService.findPipeline(identity.getPipelineId());
        context.setChannel(channel);
        context.setPipeline(pipeline);
        return context;
    }
View Full Code Here

    }

    // 调整一下线程池
    private void adjustPoolSize(DbLoadContext context) {
        Pipeline pipeline = context.getPipeline();
        int newPoolSize = pipeline.getParameters().getLoadPoolSize();
        if (newPoolSize != poolSize) {
            poolSize = newPoolSize;
            if (executor instanceof ThreadPoolExecutor) {
                ThreadPoolExecutor pool = (ThreadPoolExecutor) executor;
                pool.setCorePoolSize(newPoolSize);
View Full Code Here

            }
        }
    }

    private void adjustConfig(DbLoadContext context) {
        Pipeline pipeline = context.getPipeline();
        this.useBatch = pipeline.getParameters().isUseBatch();
    }
View Full Code Here

     * @return
     */
    private List<FileData> doFileExtract(RowBatch rowBatch) {
        List<FileData> fileDatas = new ArrayList<FileData>();
        // 处理数据
        Pipeline pipeline = getPipeline(rowBatch.getIdentity().getPipelineId());
        List<EventData> eventDatas = rowBatch.getDatas();
        for (EventData eventData : eventDatas) {
            List<DataMediaPair> dataMediaPairs = ConfigHelper.findDataMediaPairByMediaId(pipeline,
                                                                                         eventData.getTableId());
            if (dataMediaPairs == null) {
                throw new ExtractException("ERROR ## the dataMediaId = " + eventData.getTableId()
                                           + " dataMediaPair is null,please check");
            }

            for (DataMediaPair dataMediaPair : dataMediaPairs) {
                if (dataMediaPair.getResolverData() == null
                    || dataMediaPair.getResolverData().getExtensionDataType() == null
                    || (dataMediaPair.getResolverData().getExtensionDataType().isClazz() && StringUtils.isBlank(dataMediaPair.getResolverData().getClazzPath()))
                    || (dataMediaPair.getResolverData().getExtensionDataType().isSource() && StringUtils.isBlank(dataMediaPair.getResolverData().getSourceText()))) {
                    continue;
                }

                FileResolver fileResolver = null;

                if (dataMediaPair.getResolverData() != null) {
                    fileResolver = extensionFactory.getExtension(FileResolver.class, dataMediaPair.getResolverData());
                } else {
                    continue;
                }

                if (fileResolver == null) {
                    throw new ExtractException("ERROR ## the dataMediaId = " + eventData.getTableId()
                                               + " the fileResolver className  = "
                                               + dataMediaPair.getResolverData().getClazzPath()
                                               + " is null ,please check the class");
                }

                if (fileResolver instanceof RemoteDirectoryFetcherAware) {
                    RemoteDirectoryFetcherAware remoteDirectoryFetcherAware = (RemoteDirectoryFetcherAware) fileResolver;
                    remoteDirectoryFetcherAware.setRemoteDirectoryFetcher(arandaRemoteDirectoryFetcher);
                }

                List<FileData> singleRowFileDatas = getSingleRowFileInfos(dataMediaPair.getId(), fileResolver,
                                                                          eventData);
                // 做一下去重处理
                for (FileData data : singleRowFileDatas) {
                    if (!fileDatas.contains(data)) {
                        fileDatas.add(data);
                    }
                }
            }
        }

        // 判断是否需要进行图片重复同步检查
        if (pipeline.getParameters().getFileDetect()) {
            doFileDetectCollector(pipeline, fileDatas);
        }
        return fileDatas;
    }
View Full Code Here

    @Override
    public void extract(DbBatch dbBatch) throws ExtractException {
        Assert.notNull(dbBatch);
        Assert.notNull(dbBatch.getRowBatch());

        Pipeline pipeline = getPipeline(dbBatch.getRowBatch().getIdentity().getPipelineId());
        List<DataMediaPair> dataMediaPairs = pipeline.getPairs();

        /**
         * Key = TableId<br>
         * Value = a List of this tableId's column need to sync<br>
         */
 
View Full Code Here

TOP

Related Classes of com.alibaba.otter.shared.common.model.config.pipeline.Pipeline

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.