Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.HiveException


    if (partSpec != null) {
      // if partspec is specified, then only producing index for that
      // partition
      Partition part = db.getPartition(baseTbl, partSpec, false);
      if (part == null) {
        throw new HiveException("Partition "
            + Warehouse.makePartName(partSpec, false)
            + " does not exist in table "
            + baseTbl.getTableName());
      }
      baseTblPartitions.add(part);
View Full Code Here


      for (String grantStr : grantArrayStr) {
        String[] principalListAndPrivList = grantStr.split(":");
        if (principalListAndPrivList.length != 2
            || principalListAndPrivList[0] == null
            || principalListAndPrivList[0].trim().equals("")) {
          throw new HiveException(
              "Can not understand the config privilege definition " + grantStr);
        }
        String userList = principalListAndPrivList[0];
        String privList = principalListAndPrivList[1];
        checkPrivilege(privList);
View Full Code Here

    String[] ownerGrantArray = ownerGrantsInConfig.split(",");
    // verify the config
    for (String ownerGrant : ownerGrantArray) {
      Privilege prive = PrivilegeRegistry.getPrivilege(ownerGrant);
      if (prive == null) {
        throw new HiveException("Privilege " + ownerGrant + " is not found.");
      }
    }
  }
View Full Code Here

        true_parts.addAll(Hive.get().getPartitions(tab));
      }
    } catch (HiveException e) {
      throw e;
    } catch (Exception e) {
      throw new HiveException(e);
    }

    // Now return the set of partitions
    ret = new PrunedPartitionList(true_parts, unkn_parts, denied_parts);
    prunedPartitionsMap.put(key, ret);
View Full Code Here

            valueFieldsObjectInspectors);
      }
      // Serialize the value
      value = valueSerializer.serialize(values, valueObjectInspector);
    } catch (SerDeException e) {
      throw new HiveException(e);
    }
   
    try {
      if (out != null)
        out.collect(keyWritable, value);
    } catch (IOException e) {
      throw new HiveException (e);
    }
  }
View Full Code Here

              files.addAll(Arrays.asList(fs.listStatus(dirs[i].getPath())));
              // We only check one file, so exit the loop when we have at least one.
              if (files.size()>0) break;
            }
          } catch (IOException e) {
            throw new HiveException("addFiles: filesystem error in check phase", e);
          }
          // Check if the file format of the file matches that of the table.
          if (files.size() > 0) {
            int fileId = 0;
            boolean fileIsSequenceFile = true;  
            try {
              SequenceFile.Reader reader = new SequenceFile.Reader(
                fs, files.get(fileId).getPath(), conf);
              reader.close();
            } catch (IOException e) {
              fileIsSequenceFile = false;
            }
            if (!fileIsSequenceFile && tableIsSequenceFile) {
              throw new HiveException("Cannot load text files into a table stored as SequenceFile.");
            }
            if (fileIsSequenceFile && !tableIsSequenceFile) {
              throw new HiveException("Cannot load SequenceFiles into a table stored as TextFile.");
            }
          }
        }          

        if(tbd.getPartitionSpec().size() == 0) {
View Full Code Here

      } else {
        filtered_count.set(filtered_count.get()+1);
      }
    } catch (ClassCastException e) {
      e.printStackTrace();
      throw new HiveException("Non Boolean return Object type: " +
          conditionInspectableObject.o.getClass().getName());
    }
  }
View Full Code Here

      // Add the value to the vector
      storage.get(alias).add(nr);

    } catch (Exception e) {
      e.printStackTrace();
      throw new HiveException(e);
    }
  }
View Full Code Here

              outStream.close();
            }
          };
      } else {
        // should never come here - we should be catching this in ddl command
        throw new HiveException ("Illegal outputformat: " + outputFormat.getClass().getName());
      }

      // in recent hadoop versions, use deleteOnExit to clean tmp files.
      try {
        Method deleteOnExit = FileSystem.class.getDeclaredMethod("deleteOnExit", new Class [] {Path.class});
        deleteOnExit.setAccessible(true);
        deleteOnExit.invoke(fs, outPath);
        autoDelete = true;
      } catch (Exception e) {}

    } catch (HiveException e) {
      throw e;
    } catch (Exception e) {
      e.printStackTrace();
      throw new HiveException(e);
    }
  }
View Full Code Here

        row_count.set(row_count.get()+ 1);
      }
       
      outWriter.write(recordValue);
    } catch (IOException e) {
      throw new HiveException (e);
    } catch (SerDeException e) {
      throw new HiveException (e);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.HiveException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.