Package voldemort

Examples of voldemort.VoldemortException


        // Make sure it satisfies the partitionId_replicaType format
        List<FileStatus> fileList = Lists.newArrayList();
        for(FileStatus file: files) {
            if(!ReadOnlyUtils.isFormatCorrect(file.getPath().getName(),
                                              ReadOnlyStorageFormat.READONLY_V2)) {
                throw new VoldemortException("Incorrect data file name format for "
                                             + file.getPath().getName() + ". Unsupported by "
                                             + ReadOnlyStorageFormat.READONLY_V2);
            }
            fileList.add(file);
        }
View Full Code Here


    public String getType() {
        return TYPE_NAME;
    }

    public void update(StoreDefinition storeDef) {
        throw new VoldemortException("Storage config updates not permitted for "
                                     + this.getClass().getCanonicalName());
    }
View Full Code Here

        synchronized(condition) {
            while(paused) {
                try {
                    condition.wait();
                } catch(InterruptedException e) {
                    throw new VoldemortException("Pausable store interrupted while paused.");
                }
            }
        }
    }
View Full Code Here

    public String getType() {
        return TYPE_NAME;
    }

    public void update(StoreDefinition storeDef) {
        throw new VoldemortException("Storage config updates not permitted for "
                                     + this.getClass().getCanonicalName());
    }
View Full Code Here

                break;
            case 4:
                ByteUtils.writeInt(bytes, v1, 0);
                break;
            default:
                throw new VoldemortException("Key hash size " + keyHashSize + " not supported");

        }

        return bytes;
    }
View Full Code Here

        }

        @Override
        public ByteBuffer next() {
            if(!hasNext())
                throw new VoldemortException("Reached the end");

            try {
                if(coalesceCollided) {

                    // Read a short
                    ByteBuffer numKeysBytes = ByteBuffer.allocate(ByteUtils.SIZE_OF_SHORT);
                    getCurrentChunk().read(numKeysBytes, getCurrentOffsetInChunk());
                    short numKeys = numKeysBytes.getShort(0);

                    // Read all the collided values
                    ByteBuffer values = ByteBuffer.allocate(numKeys * ByteUtils.SIZE_OF_INT);
                    getCurrentChunk().read(values,
                                           getCurrentOffsetInChunk() + ByteUtils.SIZE_OF_SHORT);

                    // update the offset
                    updateOffset(getCurrentOffsetInChunk() + ByteUtils.SIZE_OF_SHORT
                                 + (numKeys * ByteUtils.SIZE_OF_INT));

                    return values;
                } else {
                    // Read a value
                    ByteBuffer value = ByteBuffer.allocate(ByteUtils.SIZE_OF_INT);
                    getCurrentChunk().read(value, getCurrentOffsetInChunk());

                    updateOffset(getCurrentOffsetInChunk() + ByteUtils.SIZE_OF_INT);

                    return value;
                }
            } catch(IOException e) {
                throw new VoldemortException(e);
            }
        }
View Full Code Here

        return storeDef.getName();
    }

    private final void checkNotNull(Object o) {
        if(o == null)
            throw new VoldemortException("Not configured yet!");
    }
View Full Code Here

                throw new IllegalStateException("Expected to find only a single store, but found multiple!");
            this.storeDef = storeDefs.get(0);

            this.numChunks = conf.getInt("num.chunks", -1);
            if(this.numChunks < 1)
                throw new VoldemortException("num.chunks not specified in the job conf.");
            this.saveKeys = conf.getBoolean("save.keys", false);
            this.reducerPerBucket = conf.getBoolean("reducer.per.bucket", false);
            this.conf = job;
            this.position = 0;
            this.outputDir = job.get("final.output.dir");
View Full Code Here

            // If we have multiple values for this md5 that is a collision,
            // throw an exception--either the data itself has duplicates, there
            // are trillions of keys, or someone is attempting something
            // malicious ( We obviously expect collisions when we save keys )
            if(!getSaveKeys() && numTuples > 1)
                throw new VoldemortException("Duplicate keys detected for md5 sum "
                                             + ByteUtils.toHexString(ByteUtils.copy(key.get(),
                                                                                    0,
                                                                                    key.getSize())));

        }

        if(numTuples < 0) {
            // Overflow
            throw new VoldemortException("Found too many collisions: chunk " + chunkId
                                         + " has exceeded " + Short.MAX_VALUE + " collisions.");
        } else if(numTuples > 1) {
            // Update number of collisions + max keys per collision
            reporter.incrCounter(CollisionCounter.NUM_COLLISIONS, 1);

            long numCollisions = reporter.getCounter(CollisionCounter.MAX_COLLISIONS).getCounter();
            if(numTuples > numCollisions) {
                reporter.incrCounter(CollisionCounter.MAX_COLLISIONS, numTuples - numCollisions);
            }
        }

        // Flush the value
        valueStream.flush();
        byte[] value = stream.toByteArray();

        // Start writing to file now
        // First, if save keys flag set the number of keys
        if(getSaveKeys()) {

            this.valueFileStream.writeShort(numTuples);
            this.position += ByteUtils.SIZE_OF_SHORT;

            if(this.checkSumDigestValue != null) {
                this.checkSumDigestValue.update(numTuples);
            }
        }

        this.valueFileStream.write(value);
        this.position += value.length;

        if(this.checkSumDigestValue != null) {
            this.checkSumDigestValue.update(value);
        }

        if(this.position < 0)
            throw new VoldemortException("Chunk overflow exception: chunk " + chunkId
                                         + " has exceeded " + Integer.MAX_VALUE + " bytes.");
    }
View Full Code Here

    public File fetch(String sourceFileUrl, String destinationFile, String hadoopConfigPath)
            throws IOException {
        if(this.globalThrottleLimit != null) {
            if(this.globalThrottleLimit.getSpeculativeRate() < this.minBytesPerSecond)
                throw new VoldemortException("Too many push jobs.");
            this.globalThrottleLimit.incrementNumJobs();
        }

        ObjectName jmxName = null;
        try {

            final Configuration config = new Configuration();
            FileSystem fs = null;
            config.setInt("io.socket.receive.buffer", bufferSize);
            config.set("hadoop.rpc.socket.factory.class.ClientProtocol",
                       ConfigurableSocketFactory.class.getName());
            config.set("hadoop.security.group.mapping",
                       "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

            final Path path = new Path(sourceFileUrl);

            boolean isHftpBasedFetch = sourceFileUrl.length() > 4
                                       && sourceFileUrl.substring(0, 4).equals("hftp");
            logger.info("URL : " + sourceFileUrl + " and hftp protocol enabled = "
                        + isHftpBasedFetch);
            logger.info("Hadoop path = " + hadoopConfigPath + " , keytab path = "
                        + HdfsFetcher.keytabPath + " , kerberos principal = "
                        + HdfsFetcher.kerberosPrincipal);

            if(hadoopConfigPath.length() > 0 && !isHftpBasedFetch) {

                config.addResource(new Path(hadoopConfigPath + "/core-site.xml"));
                config.addResource(new Path(hadoopConfigPath + "/hdfs-site.xml"));

                String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

                if(security == null || !security.equals("kerberos")) {
                    logger.error("Security isn't turned on in the conf: "
                                 + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
                                 + " = "
                                 + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
                    logger.error("Please make sure that the Hadoop config directory path is valid.");
                    throw new VoldemortException("Error in getting Hadoop filesystem. Invalid Hadoop config directory path.");
                } else {
                    logger.info("Security is turned on in the conf. Trying to authenticate ...");

                }
            }

            if(HdfsFetcher.keytabPath.length() > 0 && !isHftpBasedFetch) {

                /*
                 * We're seeing intermittent errors while trying to get the
                 * Hadoop filesystem in a privileged doAs block. This happens
                 * when we fetch the files over hdfs or webhdfs. This retry loop
                 * is inserted here as a temporary measure.
                 */
                for(int attempt = 0; attempt < maxAttempts; attempt++) {
                    boolean isValidFilesystem = false;

                    if(!new File(HdfsFetcher.keytabPath).exists()) {
                        logger.error("Invalid keytab file path. Please provide a valid keytab path");
                        throw new VoldemortException("Error in getting Hadoop filesystem. Invalid keytab file path.");
                    }

                    /*
                     * The Hadoop path for getting a Filesystem object in a
                     * privileged doAs block is not thread safe. This might be
                     * causing intermittent NPE exceptions. Adding a
                     * synchronized block.
                     */
                    synchronized(this) {
                        /*
                         * First login using the specified principal and keytab
                         * file
                         */
                        UserGroupInformation.setConfiguration(config);
                        UserGroupInformation.loginUserFromKeytab(HdfsFetcher.kerberosPrincipal,
                                                                 HdfsFetcher.keytabPath);

                        /*
                         * If login is successful, get the filesystem object.
                         * NOTE: Ideally we do not need a doAs block for this.
                         * Consider removing it in the future once the Hadoop
                         * jars have the corresponding patch (tracked in the
                         * Hadoop Apache project: HDFS-3367)
                         */
                        try {
                            logger.info("I've logged in and am now Doasing as "
                                        + UserGroupInformation.getCurrentUser().getUserName());
                            fs = UserGroupInformation.getCurrentUser()
                                                     .doAs(new PrivilegedExceptionAction<FileSystem>() {

                                                         @Override
                                                         public FileSystem run() throws Exception {
                                                             FileSystem fs = path.getFileSystem(config);
                                                             return fs;
                                                         }
                                                     });
                            isValidFilesystem = true;
                        } catch(InterruptedException e) {
                            logger.error(e.getMessage(), e);
                        } catch(Exception e) {
                            logger.error("Got an exception while getting the filesystem object: ");
                            logger.error("Exception class : " + e.getClass());
                            e.printStackTrace();
                            for(StackTraceElement et: e.getStackTrace()) {
                                logger.error(et.toString());
                            }
                        }
                    }

                    if(isValidFilesystem) {
                        break;
                    } else if(attempt < maxAttempts - 1) {
                        logger.error("Attempt#" + attempt
                                     + " Could not get a valid Filesystem object. Trying again in "
                                     + retryDelayMs + " ms");
                        sleepForRetryDelayMs();
                    }
                }
            } else {
                fs = path.getFileSystem(config);
            }

            CopyStats stats = new CopyStats(sourceFileUrl, sizeOfPath(fs, path));
            jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
            File destination = new File(destinationFile);

            if(destination.exists()) {
                throw new VoldemortException("Version directory " + destination.getAbsolutePath()
                                             + " already exists");
            }

            logger.info("Starting fetch for : " + sourceFileUrl);
            boolean result = fetch(fs, path, destination, stats);
            logger.info("Completed fetch : " + sourceFileUrl);

            // Close the filesystem
            fs.close();

            if(result) {
                return destination;
            } else {
                return null;
            }
        } catch(Throwable te) {
            te.printStackTrace();
            logger.error("Error thrown while trying to get data from Hadoop filesystem", te);
            throw new VoldemortException("Error thrown while trying to get data from Hadoop filesystem : "
                                         + te);
        } finally {
            if(this.globalThrottleLimit != null) {
                this.globalThrottleLimit.decrementNumJobs();
            }
View Full Code Here

TOP

Related Classes of voldemort.VoldemortException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.