//Lucene tokenizer are really low level ...
try {
tokenizer.reset(); //starting with Solr4 reset MUST BE called before using
while(tokenizer.incrementToken()){
//only interested in the start/end indexes of tokens
OffsetAttribute offset = tokenizer.addAttribute(OffsetAttribute.class);
if(lastAdded < 0){ //rest with this token
lastAdded = offset.startOffset();
}
if(foundWildcard){ //wildcard present in the current token
//two cases: "wildcar? at the end", "wild?ard within the word"