Package org.apache.hadoop.util.DiskChecker

Examples of org.apache.hadoop.util.DiskChecker.DiskErrorException


          it.remove();
          numFailures++;
        }
      }
      if (localDirs.isEmpty()) {
        throw new DiskErrorException(
            "No mapred local directories are writable");
      }
    }
View Full Code Here


   
    int volsFailed =  volsConfigured - storage.getNumStorageDirs();
   
    if( volsFailed < 0 ||
      volsFailed > volFailuresTolerated ) {
        throw new DiskErrorException("Invalid value for volsFailed : "
            + volsFailed + " , Volumes tolerated : " + volFailuresTolerated);
    }
   
    this.validVolsRequired =  volsConfigured - volFailuresTolerated;
   
    if (validVolsRequired < 1 ||
        validVolsRequired > storage.getNumStorageDirs()) {
      throw new DiskErrorException("Invalid value for validVolsRequired : "
          + validVolsRequired + " , Current valid volumes: " + storage.getNumStorageDirs());
    }
   
    FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
View Full Code Here

    StringBuilder sb = new StringBuilder();
    for(FSVolume fv : failed_vols) {
      sb.append(fv.dataDir.dir.getAbsolutePath() + ";");
    }

    throw  new DiskErrorException("DataNode failed volumes:" + sb);
 
  }
View Full Code Here

                }
            }
        }

        if( !writable )
            throw new DiskErrorException(
                    "all local directories are not writable" );
    }
View Full Code Here

      if (returnPath != null) {
        return returnPath;
      }
     
      //no path found
      throw new DiskErrorException("Could not find any valid local " +
          "directory for " + pathStr);
    }
View Full Code Here

        }
        numDirsSearched++;
      }

      //no path found
      throw new DiskErrorException ("Could not find " + pathStr +" in any of" +
      " the configured local directories");
    }
View Full Code Here

    int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
    int volsFailed = volsConfigured - storage.getNumStorageDirs();
    this.validVolsRequired = volsConfigured - volFailuresTolerated;

    if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
      throw new DiskErrorException("Invalid volume failure "
          + " config value: " + volFailuresTolerated);
    }
    if (volsFailed > volFailuresTolerated) {
      throw new DiskErrorException("Too many failed volumes - "
          + "current valid volumes: " + storage.getNumStorageDirs()
          + ", volumes configured: " + volsConfigured
          + ", volumes failed: " + volsFailed
          + ", volume failures tolerated: " + volFailuresTolerated);
    }
View Full Code Here

    StringBuilder sb = new StringBuilder();
    for (FSVolume fv : failedVols) {
      sb.append(fv.currentDir.getAbsolutePath() + ";");
    }

    throw  new DiskErrorException("DataNode failed volumes:" + sb);
  }
View Full Code Here

        }
      }
    }

    if (!writable)
      throw new DiskErrorException(
                                   "all local directories are not writable");
  }
View Full Code Here

        }
      }
    }

    if (!writable)
      throw new DiskErrorException("all local directories are not writable");
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.util.DiskChecker.DiskErrorException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.