Examples of PhysicalPartition


Examples of com.linkedin.databus.core.data_model.PhysicalPartition

                                    ReplicationBitSetterStaticConfig replicationBitSetter)
  {
    super();
    _name = name;
    _id = id;
    _partiton = new PhysicalPartition(id, _name);
    _uri = uri;
    _source = new PhysicalSource(uri, role, resourceKey);
    _sources = sources;
    _slowSourceQueryThreshold = slowSourceQueryThreshold;
    _restartScnOffset = restartScnOffset;
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

    // generate testData
    int scn = 100;
    String srcName = "srcName";
    int srcId = 1;
    PhysicalSource pS = pStatConf1.getPhysicalSource();
    PhysicalPartition pP = pStatConf1.getPhysicalPartition();
    _events = new TestDbusEvent [20];
    LogicalPartition lP = new LogicalPartition((short)0);
    for(int i=0; i<_events.length; i++) {
        _events[i] = new TestDbusEvent(i, scn,
                                       new LogicalSource(srcId, srcName+srcId),
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

     createBufMult();

     PhysicalSourceStaticConfig pStatConf1 = convertToPhysicalSourceConfig(_configSource1).build();
     PhysicalSourceStaticConfig pStatConf2 = convertToPhysicalSourceConfig(_configSource2).build();

     PhysicalPartition pP = pStatConf1.getPhysicalPartition();
     DbusEventBufferAppendable buf = _eventBufferMult.getDbusEventBufferAppendable(pP);

     buf.startEvents();
     byte [] schema = "abcdefghijklmnop".getBytes(Charset.defaultCharset());
     assertTrue(buf.appendEvent(new DbusEventKey(1), (short)100, (short)0,
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

    LOG.info("Reading events from " + inputSrcIds);

    // create checkpoints
    CheckpointMult cpMult = new CheckpointMult();
    for(PhysicalSourceStaticConfig pConf : _pConfigs) {
      PhysicalPartition pPart = pConf.getPhysicalPartition();
      Checkpoint cp;
      if(cpMult.getCheckpoint(pPart) == null) { // needs a new checkpoint
        cp = new Checkpoint();
        cp.setFlexible();
        cpMult.addCheckpoint(pPart, cp);
      }
    }

    DbusEventBufferBatchReadable read =
      _eventBufferMult.getDbusEventBufferBatchReadable(srcIds, cpMult, null);

    int totalRead = 0;
    int numEventsRead = Integer.MAX_VALUE;
    int numNonControlEventsRead = 0;
    int maxIterNum = 100, iterCount = 0;
    String prevEvent = "";
    while(numEventsRead > 0) {
      if(iterCount++ > maxIterNum) {
        fail("Checkpoint doesn't work - it is a never-ending loop");
      }
      ByteArrayOutputStream jsonOut = new ByteArrayOutputStream();
      WritableByteChannel jsonOutChannel = Channels.newChannel(jsonOut);

      numEventsRead = read.streamEvents(false, batchFetchSize, jsonOutChannel,
                                        Encoding.JSON_PLAIN_VALUE,  new SourceDbusFilter(srcIds)).getNumEventsStreamed();

      totalRead += numEventsRead;
      LOG.info("read for " + inputSrcIds + ": " + numEventsRead + " events");
      byte[] jsonBytes = jsonOut.toByteArray();
      if(jsonBytes.length == 0)
        break; // nothing more to read
      String jsonString = new String(jsonBytes);
      String [] jsonStrings = jsonString.split("\n");
      assertEquals(jsonStrings.length, numEventsRead);

      ObjectMapper mapper = new ObjectMapper();

      for(int i=0; i<jsonStrings.length; i++) {
        // verify what was written
        String evtStr = jsonStrings[i];
        if (evtStr.equals(prevEvent)) {
          // It may so happen that we receive the same event twice, especially when the
          // offered buffer is small. This check gets around the issue.
          continue;
        }
        prevEvent = evtStr;
        Map<String, Object> jsonMap = mapper.readValue(evtStr,
                                                       new TypeReference<Map<String, Object>>(){});
        //assertEquals(jsonMap.size(), 10);
        Integer srcId = (Integer)jsonMap.get("srcId");
        if(!DbusEventUtils.isControlSrcId(srcId)) { // not a control message
          numNonControlEventsRead++;
          Integer physicalPartitionId = (Integer)jsonMap.get("physicalPartitionId");
          Integer logicalPartitionId = (Integer)jsonMap.get("logicalPartitionId");
          PhysicalPartition pPartition = _eventBufferMult.getPhysicalPartition(srcId,
                                         new LogicalPartition(logicalPartitionId.shortValue()));
          LOG.info("EVENT: " + jsonMap.toString());
          assertTrue( srcIds.contains(srcId), "src id " + srcId + " doesn't match to " + inputSrcIds);
          assertEquals(physicalPartitionId, pPartition.getId(), "physical partition id didn't match");
        } else {
          LOG.info("Control event: " + jsonMap.toString());
        }
      }
    }
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

    createBufMult();
    addEvents();
    Integer pPartitionId = 100;

    // get buffer by partition id
    PhysicalPartition pPartition = new PhysicalPartition(pPartitionId, "multBufferTest1");
    DbusEventBufferAppendable buf = _eventBufferMult.getDbusEventBufferAppendable(pPartition);
    assertNotNull(buf,"cannot get by pPartition" + pPartition);

    // get buffer by physical partition
    buf = _eventBufferMult.getDbusEventBufferAppendable(pPartition);
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

  @Test
  public void testReset() throws Exception {
    createBufMult();
    addEvents();
    Integer pPartitionId = 100;
    PhysicalPartition pPartition = new PhysicalPartition(pPartitionId, "multBufferTest1");

    _eventBufferMult.resetBuffer(pPartition, 108L);
    DbusEventBufferAppendable eventBuffer = _eventBufferMult.getDbusEventBufferAppendable(pPartition);

    Assert.assertTrue(eventBuffer.empty());
    pPartition = new PhysicalPartition(pPartitionId, "unknownBuffer");
    boolean caughtException = false;
    try
    {
      _eventBufferMult.resetBuffer(pPartition, -1L);
    }
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

  }

  /** overrides HTTP relay method */
  @Override
  public void removeOneProducer(PhysicalSourceStaticConfig pConfig) {
    PhysicalPartition pPartition = pConfig.getPhysicalPartition();

    List<EventProducer> plist = new ArrayList<EventProducer>();

    if (_producers != null && _producers.containsKey(pPartition))
      plist.add(_producers.remove(pPartition));
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

      UnsupportedKeyException, SQLException, InvalidConfigException {

    // Register a command to allow start/stop/status of the relay
    List<EventProducer> plist = new ArrayList<EventProducer>();

    PhysicalPartition pPartition = pConfig.getPhysicalPartition();
    MaxSCNReaderWriter maxScnReaderWriters = _maxScnReaderWriters
        .getOrCreateHandler(pPartition);
    LOG.info("Starting server container with maxScnReaderWriter:"
        + maxScnReaderWriters);

    // Get the event buffer
    DbusEventBufferAppendable dbusEventBuffer = getEventBuffer()
        .getDbusEventBufferAppendable(pPartition);

    // Get the schema registry service
    SchemaRegistryService schemaRegistryService = getSchemaRegistryService();

    // Get a stats collector per physical source
    addPhysicalPartitionCollectors(pPartition);
    String statsCollectorName = pPartition.toSimpleString();
    /*
     * _inBoundStatsCollectors.addStatsCollector(statsCollectorName, new
     * DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
     * statsCollectorName+".inbound", true, false, getMbeanServer()));
     *
     * _outBoundStatsCollectors.addStatsCollector(statsCollectorName, new
     * DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
     * statsCollectorName+".outbound", true, false, getMbeanServer()));
     */

    // Create the event producer
    String uri = pConfig.getUri();
    if(uri == null)
      throw new DatabusException("Uri is required to start the relay");
    uri = uri.trim();
    EventProducer producer = null;
    if (uri.startsWith("jdbc:")) {
      SourceType sourceType = pConfig.getReplBitSetter().getSourceType();
          if (SourceType.TOKEN.equals(sourceType))
            throw new DatabusException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");

      // if a buffer for this partiton exists - we are overwri
      producer = new OracleEventProducerFactory().buildEventProducer(
          pConfig, schemaRegistryService, dbusEventBuffer,
          getMbeanServer(), _inBoundStatsCollectors
              .getStatsCollector(statsCollectorName),
          maxScnReaderWriters);
    } else if (uri.startsWith("mock")) {
      // Get all relevant pConfig attributes
      //TODO add real instantiation
      EventProducerServiceProvider mockProvider = _producersRegistry.getEventProducerServiceProvider("mock");
      if (null == mockProvider)
      {
        throw new DatabusRuntimeException("relay event producer not available: " + "mock");
      }
      producer = mockProvider.createProducer(pConfig, schemaRegistryService,
                                             dbusEventBuffer,
                                             _inBoundStatsCollectors
                                                     .getStatsCollector(statsCollectorName),
                                             maxScnReaderWriters);
    } else if (uri.startsWith("gg:")){
      producer = new GoldenGateEventProducer(pConfig,
                                             schemaRegistryService,
                                             dbusEventBuffer,
                                             _inBoundStatsCollectors
                                                 .getStatsCollector(statsCollectorName),
                                             maxScnReaderWriters);

    } else if (uri.startsWith("mysql:")){
       LOG.info("Adding OpenReplicatorEventProducer for uri :" + uri);
       final String serviceName = "or";
       EventProducerServiceProvider orProvider = _producersRegistry.getEventProducerServiceProvider(serviceName);
       if (null == orProvider)
       {
         throw new DatabusRuntimeException("relay event producer not available: " + serviceName);
       }
       producer = orProvider.createProducer(pConfig, schemaRegistryService,
                                            dbusEventBuffer,
                                            _inBoundStatsCollectors.getStatsCollector(statsCollectorName),
                                            maxScnReaderWriters);
    } else
     {
      // Get all relevant pConfig attributes and initialize the nettyThreadPool objects
      RelayEventProducer.DatabusClientNettyThreadPools nettyThreadPools =
            new RelayEventProducer.DatabusClientNettyThreadPools(0,getNetworkTimeoutTimer(),getBossExecutorService(),
                                        getIoExecutorService(), getHttpChannelGroup());
      producer = new RelayEventProducer(pConfig, dbusEventBuffer,
          _inBoundStatsCollectors
              .getStatsCollector(statsCollectorName),
          maxScnReaderWriters,nettyThreadPools);
    }

    // if a buffer for this partiton exists - we are overwriting it.
    _producers.put(pPartition, producer);

    plist.add(producer);
    // append 'monitoring event producer'
    if (producer instanceof OracleEventProducer) {
      MonitoringEventProducer monitoringProducer = new MonitoringEventProducer(
          "dbMonitor." + pPartition.toSimpleString(),
          pConfig.getName(), pConfig.getUri(),
          ((OracleEventProducer) producer).getMonitoredSourceInfos(),
          getMbeanServer());
      _monitoringProducers.put(pPartition, monitoringProducer);
      plist.add(monitoringProducer);
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

  @Override
  protected void doShutdown() {
    LOG.warn("Shutting down Relay!");
    for (Entry<PhysicalPartition, EventProducer> entry : _producers
        .entrySet()) {
      PhysicalPartition pPartition = entry.getKey();
      EventProducer producer = entry.getValue();

      if (null != producer
          && (producer.isRunning() || producer.isPaused())) {
        producer.shutdown();
View Full Code Here

Examples of com.linkedin.databus.core.data_model.PhysicalPartition

    for(int id: pSrcIds.keySet()) {
      Checkpoint cp = new Checkpoint();
      cp.setWindowOffset(pSrcIds.get(id).get(0).longValue());
      cp.setWindowScn((long)pSrcIds.get(id).get(1));
      PhysicalPartition pPart = new PhysicalPartition(id, "name");
      cpMult.addCheckpoint(pPart, cp);
    }
    return cpMult;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.