Examples of DataContainer


Examples of com.github.nmorel.gwtjackson.benchmark.client.data.DataContainer

            panel.addStyleName( colClass );
            resultRow.add( panel );
        }

        // initialize data
        final DataContainer container = DataProvider.generateData( criteria.getNbItems() );

        // prepare the test
        final List<Operation> operations = new ArrayList<>();
        for ( Mechanism mechanism : mechanisms ) {
            operations.addAll( mechanism.prepare( container, criteria ) );
View Full Code Here

Examples of com.ibm.xsp.model.DataContainer

    /* (non-Javadoc)
     * @see com.ibm.xsp.model.AbstractDataSource#save(javax.faces.context.FacesContext, boolean)
     */
    @Override
    public boolean save(FacesContext context, boolean force) throws FacesExceptionEx {
        DataContainer container = getDataContainer();
        if(container instanceof Container){
            DataAccessor accessor = ((Container)container).getDataAccessor();
            if(accessor instanceof FileServiceAccessor){
                UploadedFile uploadedFile = (UploadedFile)((FileServiceAccessor)accessor).getUploadedFile();
                if(uploadedFile != null){
View Full Code Here

Examples of com.ovea.jetty.session.serializer.jboss.serial.objectmetamodel.DataContainer

        dataContainer.getCache().reset();
    }

    private void checkOutput() throws IOException {
        if (objectOutput == null) {
            dataContainer = new DataContainer(null, this.getSubstitutionInterface(), checkSerializableClass, buffer, classDescriptorStrategy, objectDescriptorStrategy);
            if (output instanceof DataOutputStream) {
                dataOutput = (DataOutputStream) output;
            } else {
                dataOutput = new DataOutputStream(output);
            }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer

            }
          }
        }

        // Create a data container
        DataContainer dc = null;
        if (tbd.getPartitionSpec().size() == 0) {
          dc = new DataContainer(table.getTTable());
          db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable()
              .getTableName(), tbd.getReplace(), tbd.getHoldDDLTime());
          if (work.getOutputs() != null) {
            work.getOutputs().add(new WriteEntity(table, true));
          }
        } else {
          LOG.info("Partition is: " + tbd.getPartitionSpec().toString());

          // Check if the bucketing and/or sorting columns were inferred
          List<BucketCol> bucketCols = null;
          List<SortCol> sortCols = null;
          int numBuckets = -1;
          Task task = this;
          String path = tbd.getSourceDir();
          // Find the first ancestor of this MoveTask which is some form of map reduce task
          // (Either standard, local, or a merge)
          while (task.getParentTasks() != null && task.getParentTasks().size() == 1) {
            task = (Task)task.getParentTasks().get(0);
            // If it was a merge task or a local map reduce task, nothing can be inferred
            if (task instanceof BlockMergeTask || task instanceof MapredLocalTask) {
              break;
            }

            // If it's a standard map reduce task, check what, if anything, it inferred about
            // the directory this move task is moving
            if (task instanceof MapRedTask) {
              MapredWork work = (MapredWork)task.getWork();
              MapWork mapWork = work.getMapWork();
              bucketCols = mapWork.getBucketedColsByDirectory().get(path);
              sortCols = mapWork.getSortedColsByDirectory().get(path);
              if (work.getReduceWork() != null) {
                numBuckets = work.getReduceWork().getNumReduceTasks();
              }

              if (bucketCols != null || sortCols != null) {
                // This must be a final map reduce task (the task containing the file sink
                // operator that writes the final output)
                assert work.isFinalMapRed();
              }
              break;
            }

            // If it's a move task, get the path the files were moved from, this is what any
            // preceding map reduce task inferred information about, and moving does not invalidate
            // those assumptions
            // This can happen when a conditional merge is added before the final MoveTask, but the
            // condition for merging is not met, see GenMRFileSink1.
            if (task instanceof MoveTask) {
              if (((MoveTask)task).getWork().getLoadFileWork() != null) {
                path = ((MoveTask)task).getWork().getLoadFileWork().getSourceDir();
              }
            }
          }
          // deal with dynamic partitions
          DynamicPartitionCtx dpCtx = tbd.getDPCtx();
          if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions

            List<LinkedHashMap<String, String>> dps = Utilities.getFullDPSpecs(conf, dpCtx);

            // publish DP columns to its subscribers
            if (dps != null && dps.size() > 0) {
              pushFeed(FeedType.DYNAMIC_PARTITIONS, dps);
            }

            // load the list of DP partitions and return the list of partition specs
            // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions
            // to use Utilities.getFullDPSpecs() to get the list of full partSpecs.
            // After that check the number of DPs created to not exceed the limit and
            // iterate over it and call loadPartition() here.
            // The reason we don't do inside HIVE-1361 is the latter is large and we
            // want to isolate any potential issue it may introduce.
            ArrayList<LinkedHashMap<String, String>> dp =
              db.loadDynamicPartitions(
                  new Path(tbd.getSourceDir()),
                  tbd.getTable().getTableName(),
                  tbd.getPartitionSpec(),
                  tbd.getReplace(),
                  dpCtx.getNumDPCols(),
                  tbd.getHoldDDLTime(),
                  isSkewedStoredAsDirs(tbd));

            if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) {
              throw new HiveException("This query creates no partitions." +
                  " To turn off this error, set hive.error.on.empty.partition=false.");
            }

            // for each partition spec, get the partition
            // and put it to WriteEntity for post-exec hook
            for (LinkedHashMap<String, String> partSpec: dp) {
              Partition partn = db.getPartition(table, partSpec, false);

              if (bucketCols != null || sortCols != null) {
                updatePartitionBucketSortColumns(table, partn, bucketCols, numBuckets, sortCols);
              }

              WriteEntity enty = new WriteEntity(partn, true);
              if (work.getOutputs() != null) {
                work.getOutputs().add(enty);
              }
              // Need to update the queryPlan's output as well so that post-exec hook get executed.
              // This is only needed for dynamic partitioning since for SP the the WriteEntity is
              // constructed at compile time and the queryPlan already contains that.
              // For DP, WriteEntity creation is deferred at this stage so we need to update
              // queryPlan here.
              if (queryPlan.getOutputs() == null) {
                queryPlan.setOutputs(new HashSet<WriteEntity>());
              }
              queryPlan.getOutputs().add(enty);

              // update columnar lineage for each partition
              dc = new DataContainer(table.getTTable(), partn.getTPartition());

              if (SessionState.get() != null) {
                SessionState.get().getLineageState().setLineage(tbd.getSourceDir(), dc,
                    table.getCols());
              }

              console.printInfo("\tLoading partition " + partSpec);
            }
            dc = null; // reset data container to prevent it being added again.
          } else { // static partitions
            List<String> partVals = MetaStoreUtils.getPvals(table.getPartCols(),
                tbd.getPartitionSpec());
            db.validatePartitionNameCharacters(partVals);
            db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
                tbd.getPartitionSpec(), tbd.getReplace(), tbd.getHoldDDLTime(),
                tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd));
            Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);

            if (bucketCols != null || sortCols != null) {
              updatePartitionBucketSortColumns(table, partn, bucketCols, numBuckets, sortCols);
            }

            dc = new DataContainer(table.getTTable(), partn.getTPartition());
            // add this partition to post-execution hook
            if (work.getOutputs() != null) {
              work.getOutputs().add(new WriteEntity(partn, true));
            }
         }
View Full Code Here

Examples of org.apache.olingo.odata2.ref.model.DataContainer

  private static ScenarioDataSource dataSource;
  private static ListsProcessor processor;

  @BeforeClass
  public static void init() {
    dataContainer = new DataContainer();
    dataContainer.reset();
    dataSource = new ScenarioDataSource(dataContainer);
    processor = new ListsProcessor(dataSource);
  }
View Full Code Here

Examples of org.gumtree.data.common.DataContainer

    // Instantiate the dictionary
    IPathResolver pathResolver = new DictionaryPathResolver(dictionary);

    // Instantiation the "container" that links the dataset and the dictionary
    IDataContainer container = new DataContainer(dataset);

    // Set dictionary on the container
    container.setPathSolver(pathResolver);

    // Parse the file (using the dictionary) to display data
    for (String key : dictionary.keySet())
    {
      String value;
      try
      {
        // get the value of the dataset corresponding to the current key
        value = container.get(key).toString();
        System.out.println(key + ": " + (value.length() < 1000 ? value : "Data to long to be displayed using eclipse!") );
      }
      catch(DataModelException n)
      {
        System.out.println("ERROR on key: " + key);
        n.printStackTrace();
      }

      // Get the group node corresponding to the current key
      group = dataset.getRootGroup();
      DataItem toto = (DataItem) group.getObjectByPath(container.getPathResolver()
          .resolvePath(key));
      group = toto.getParentGroup();

      // Scan node's attribute
      List<?> list_attr = group.getAttributes();
View Full Code Here

Examples of org.infinispan.container.DataContainer

    * called recursively so that it iterates through the whole cache.
    *
    * @throws Exception if there's any issues reading the data from the cache or pushing data to the cache store.
    */
   protected void pushState(final Cache<Object, Object> cache) throws Exception {
      DataContainer dc = cache.getAdvancedCache().getDataContainer();
      Set<Object> keys = dc.keySet();
      InternalCacheEntry entry;
      for (Object k : keys) if ((entry = dc.get(k)) != null) store(entry);
   }
View Full Code Here

Examples of org.infinispan.container.DataContainer

      CacheNotifier cacheNotifier = mock(CacheNotifier.class);
      RpcManager rpcManager = mock(RpcManager.class);
      Transport transport = mock(Transport.class);
      CommandsFactory commandsFactory = mock(CommandsFactory.class);
      CacheLoaderManager cacheLoaderManager = mock(CacheLoaderManager.class);
      DataContainer dataContainer = mock(DataContainer.class);
      TransactionTable transactionTable = mock(TransactionTable.class);
      StateTransferLock stateTransferLock = mock(StateTransferLock.class);
      InterceptorChain interceptorChain = mock(InterceptorChain.class);
      InvocationContextContainer icc = mock(InvocationContextContainer.class);

      when(commandsFactory.buildStateRequestCommand(any(StateRequestCommand.Type.class), any(Address.class), anyInt(), any(Set.class))).thenAnswer(new Answer<StateRequestCommand>() {
         @Override
         public StateRequestCommand answer(InvocationOnMock invocation) {
            return new StateRequestCommand("cache1", (StateRequestCommand.Type) invocation.getArguments()[0], (Address) invocation.getArguments()[1], (Integer) invocation.getArguments()[2], (Set) invocation.getArguments()[3]);
         }
      });

      when(transport.getViewId()).thenReturn(1);
      when(rpcManager.getAddress()).thenReturn(addresses[0]);
      when(rpcManager.getTransport()).thenReturn(transport);

      final Map<Address, Set<Integer>> requestedSegments = ConcurrentMapFactory.makeConcurrentMap();
      final Set<Integer> flatRequestedSegments = new ConcurrentSkipListSet<Integer>();
      when(rpcManager.invokeRemotely(any(Collection.class), any(StateRequestCommand.class), any(ResponseMode.class), anyLong())).thenAnswer(new Answer<Map<Address, Response>>() {
         @Override
         public Map<Address, Response> answer(InvocationOnMock invocation) {
            Collection<Address> recipients = (Collection<Address>) invocation.getArguments()[0];
            Address recipient = recipients.iterator().next();
            StateRequestCommand cmd = (StateRequestCommand) invocation.getArguments()[1];
            Map<Address, Response> results = new HashMap<Address, Response>(1);
            if (cmd.getType().equals(StateRequestCommand.Type.GET_TRANSACTIONS)) {
               results.put(recipient, SuccessfulResponse.create(new ArrayList<TransactionInfo>()));
               Set<Integer> segments = (Set<Integer>) cmd.getParameters()[3];
               requestedSegments.put(recipient, segments);
               flatRequestedSegments.addAll(segments);
            } else if (cmd.getType().equals(StateRequestCommand.Type.START_STATE_TRANSFER)
                  || cmd.getType().equals(StateRequestCommand.Type.CANCEL_STATE_TRANSFER)) {
                  results.put(recipient, SuccessfulResponse.SUCCESSFUL_EMPTY_RESPONSE);
            }
            return results;
         }
      });


      // create state provider
      final StateConsumerImpl stateConsumer = new StateConsumerImpl();
      stateConsumer.init(cache, pooledExecutorService, stateTransferManager, interceptorChain, icc, configuration, rpcManager, null,
            commandsFactory, cacheLoaderManager, dataContainer, transactionTable, stateTransferLock, cacheNotifier);
      stateConsumer.start();

      final List<InternalCacheEntry> cacheEntries = new ArrayList<InternalCacheEntry>();
      Object key1 = new TestKey("key1", 0, ch1);
      Object key2 = new TestKey("key2", 0, ch1);
      cacheEntries.add(new ImmortalCacheEntry(key1, "value1"));
      cacheEntries.add(new ImmortalCacheEntry(key2, "value2"));
      when(dataContainer.iterator()).thenAnswer(new Answer<Iterator<InternalCacheEntry>>() {
         @Override
         public Iterator<InternalCacheEntry> answer(InvocationOnMock invocation) {
            return cacheEntries.iterator();
         }
      });
View Full Code Here

Examples of org.infinispan.container.DataContainer

         Cache cache = manager.getCache("cache");
         cache.put(k(m, 1), v(m, 1));
         cache.put(k(m, 2), v(m, 2));
         cache.put(k(m, 3), v(m, 3));
         DummyInMemoryCacheStore store = getDummyStore(cache);
         DataContainer data = getDataContainer(cache);
         assert !store.isEmpty();
         assert 0 != data.size();
         manager.removeCache("cache");
         assert store.isEmpty();
         assert 0 == data.size();
         // Try removing the cache again, it should be a no-op
         manager.removeCache("cache");
         assert store.isEmpty();
         assert 0 == data.size();
      } finally {
         manager.stop();
      }
   }
View Full Code Here

Examples of org.infinispan.container.DataContainer

            cache1.put(k(m, 2), v(m, 2));
            cache1.put(k(m, 3), v(m, 3));
            cache2.put(k(m, 4), v(m, 4));
            cache2.put(k(m, 5), v(m, 5));
            DummyInMemoryCacheStore store1 = getDummyStore(cache1);
            DataContainer data1 = getDataContainer(cache1);
            DummyInMemoryCacheStore store2 = getDummyStore(cache2);
            DataContainer data2 = getDataContainer(cache2);
            assert !store1.isEmpty();
            assert 5 == data1.size();
            assert !store2.isEmpty();
            assert 5 == data2.size();
            manager1.removeCache("cache");
            assert !manager1.cacheExists("cache");
            assert !manager2.cacheExists("cache");
            assert null == manager1.getCache("cache", false);
            assert null == manager2.getCache("cache", false);
            assert store1.isEmpty();
            assert 0 == data1.size();
            assert store2.isEmpty();
            assert 0 == data2.size();
         }
      });
   }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.