hardworker - 2015-10-29

I am trying to build an application that use pocketsphinx for testing memorizing some words
the user ask to speak and the decoder get result
the result is compared to reference text and feedback is displayed to the user
if user make mistake the application will make Knock Knock for the first time if the user make another mistake an audio file will be played
the problem is that when I want to play the audio file i have to stop the recognizer this will hurt the flow of user experience

https://github.com/cmusphinx/pocketsphinx-android-demo

this is my code

import static edu.cmu.pocketsphinx.SpeechRecognizerSetup.defaultSetup;

import android.os.AsyncTask;
import android.os.Environment;

import edu.cmu.pocketsphinx.Hypothesis;
import edu.cmu.pocketsphinx.RecognitionListener;

public class LocalRecognizer implements RecognitionListener {

private boolean isRecording = false;
private String pageIndex;
private SpeechRecognizer recognizer;
private String tmpResult = "";

public LocalRecognizer(RecognitionStatus obj, String pageIndex) {
 this.pageIndex = pageIndex;
 LoaderRecognitionStatus(obj);
 initialRecognizer();
}

@Override
public void onPartialResult(Hypothesis hypothesis) {
 // TODO
}

@Override
public void onResult(Hypothesis hypothesis) {
 // TODO
}

@Override
public void onBeginningOfSpeech() {
 Log.e("@onBeginningOfSpeech", "---onBeginningOfSpeech");
 listener.onBeginningOfSpeech();
}

@Override
public void onEndOfSpeech() {

 Hypothesis hypothesis = this.recognizer.getDecoder().hyp();
 if (hypothesis != null) {
     String text = hypothesis.getHypstr();
     text = text.trim();
     // Log.e("@onEndOfSpeech", "---onEndOfSpeech00: " + text);
     // Log.e("@", "");

     if (text == null || text.equals(""))
         text = "";

     listener.onPartialResult(text.replace(this.tmpResult, ""));
     listener.onEndOfSpeech();
     Log.e("@onEndOfSpeech", text.replace(this.tmpResult, ""));

     if (text != null && !text.equals(""))
         this.tmpResult = text;

     // this.recognizer.getDecoder().hyp().delete();
 }

 // switchSearch(recognizer.getSearchName());
}

@Override
public void onError(Exception paramException) {
 listener.onErrorRecognizer(paramException.toString());
}

@Override
public void onTimeout() {
 listener.onTimeoutRecognizer();
}

private void initialRecognizer() {
 listener.onInitialRecognizer(false);

 new AsyncTask<Void, Void, Exception>() {
     @Override
     protected Exception doInBackground(Void... params) {
         Log.e("@doInBackground", "---doInBackground");
         File assetDir = new File(Environment
                 .getExternalStorageDirectory().getAbsolutePath()
                 + SysConstants.APP_DIR);
         setupRecognizer(assetDir);
         return null;
     }

     @Override
     protected void onPostExecute(Exception result) {
         Log.e("@onPostExecute", "---onPostExecute");
         if (result != null) {
             listener.onErrorRecognizer(result.toString());
         } else {
             listener.onInitialRecognizer(true);
         }
     }
 }.execute();

}

private void switchSearch(String searchName) {
 if (isRecording) {
     Log.e("@switchSearch", "---searchName: " + searchName);
     recognizer.stop();
     recognizer.startListening(searchName);
 } else if (recognizer != null)
     recognizer.stop();

}

private void setupRecognizer(File assetsDir) {
 // Log.e("@setupRecognizer", "....");
 try {
     File modelsDir = new File(assetsDir, "model");
     recognizer = defaultSetup()
             .setAcousticModel(new File(modelsDir, "hmm"))
             .setDictionary(new File(modelsDir, "dic/an4.dic"))
             .setRawLogDir(new File(modelsDir, "raw"))
             // .setKeywordThreshold(5000)
             // .setKeywordThreshold(1e-20f)
             .setSampleRate(8000).setBackTrace(true)
             // .setBeamWidth(1e-99f)
             // .setupFromFile(W)
             .getRecognizer();
     recognizer.addListener(this);

     // Create keyword-activation search.
     // recognizer.addKeyphraseSearch(KWS_SEARCH, KEYPHRASE);
     // Create grammar-based searches.
     // File menuGrammar = new File(modelsDir, "grammar/menu.gram");
     // recognizer.addGrammarSearch(MENU_SEARCH, menuGrammar);
     // File digitsGrammar = new File(modelsDir, "grammar/digits.gram");
     // recognizer.addGrammarSearch(DIGITS_SEARCH, digitsGrammar);
     // Create language model search.
     File verseLM1 = new File(modelsDir, "lm/an4.DMP");
     recognizer.addNgramSearch(pageIndex, verseLM1);
 } catch (java.lang.RuntimeException e) {
     Log.e("@startRecording", "RuntimeException: "
             + e.getMessage().toString());
     e.printStackTrace();
 } catch (Exception e) {
     Log.e("@startRecording", "Exception: " + e.getMessage().toString());
     e.printStackTrace();
 }
}

public void startRecording() {
 try {
     Runtime.getRuntime().exec(new String[] { "logcat", "-c" });
     isRecording = true;
     switchSearch(pageIndex);
     listener.onStartRecognizer();
 } catch (IOException e) {
     Log.e("@startRecording", "IOException: "
             + e.getMessage().toString());
     e.printStackTrace();
 } catch (java.lang.RuntimeException e) {
     Log.e("@startRecording", "RuntimeException: "
             + e.getMessage().toString());
     e.printStackTrace();
 } catch (Exception e) {
     Log.e("@startRecording", "Exception: " + e.getMessage().toString());
     e.printStackTrace();
 }
}

public void stopRecording() {
 if (recognizer != null)
     recognizer.stop();

 isRecording = false;
 listener.onStopRecognizer();

}

public void killRecording() {
 if (recognizer != null) {
     recognizer.cancel();
     recognizer.shutdown();
 }
 listener.onStopRecognizer();
}

public void pause() {
 if (recognizer != null)
     recognizer.stop();

 isRecording = false;
 listener.onPauseRecognizer();
}

public void resume() {
 try {
     Runtime.getRuntime().exec(new String[] { "logcat", "-c" });
     isRecording = true;
     switchSearch(pageIndex);
     listener.onResumeRecognizer();
 } catch (java.lang.RuntimeException e) {
     Log.e("@startRecording", "RuntimeException: "
             + e.getMessage().toString());
     e.printStackTrace();
 } catch (Exception e) {
     Log.e("@startRecording", "Exception: " + e.getMessage().toString());
     e.printStackTrace();
 }
}

// //////////////////////////////////
private RecognitionStatus listener;

public interface RecognitionStatus {

 public abstract void onInitialRecognizer(boolean isInitial);

 public abstract void onStartRecognizer();

 public abstract void onBeginningOfSpeech();

 public abstract void onEndOfSpeech();

 public abstract void onPauseRecognizer();

 public abstract void onResumeRecognizer();

 public abstract void onStopRecognizer();

 public abstract void onErrorRecognizer(String error);

 public abstract void onPartialResult(String result);

 public abstract void onFinalResult(String result);

 public abstract void onTimeoutRecognizer();

}

public void LoaderRecognitionStatus(RecognitionStatus callback) {
 listener = callback;
}

}

//////////////////**//////////////////

import android.media.AudioRecord;
import android.os.Handler;
import android.os.Looper;
import edu.cmu.pocketsphinx.Config;
import edu.cmu.pocketsphinx.Decoder;
import edu.cmu.pocketsphinx.FsgModel;
import edu.cmu.pocketsphinx.Hypothesis;
import edu.cmu.pocketsphinx.RecognitionListener;

public class SpeechRecognizer {
protected static final String TAG = SpeechRecognizer.class.getSimpleName();
private final Decoder decoder;
private final int sampleRate;
private static final float BUFFER_SIZE_SECONDS = 0.4F;
private int bufferSize;
private final AudioRecord recorder;
private Thread recognizerThread;
private final Handler mainHandler = new Handler(Looper.getMainLooper());

private final Collection<RecognitionListener> listeners = new HashSet();

public SpeechRecognizer(Config config) throws IOException {
 this.decoder = new Decoder(config);
 this.sampleRate = (int) this.decoder.getConfig().getFloat("-samprate");
 this.bufferSize = 2048;// Math.round(this.sampleRate * 0.4F);
 this.recorder = new AudioRecord(6, this.sampleRate, 16, 2, 8192);
 // TODO COPY
 if (this.recorder.getState() == 0) {
     this.recorder.release();
     throw new IOException(
             "Failed to initialize recorder. Microphone might be already in use.");
 }
}

public void addListener(RecognitionListener listener) {
 /* 98 */synchronized (this.listeners) {
     /* 99 */this.listeners.add(listener);
 }
}

public void removeListener(RecognitionListener listener) {
 /* 107 */synchronized (this.listeners) {
     /* 108 */this.listeners.remove(listener);
 }
}

public boolean startListening(String searchName) {
 if (null != this.recognizerThread) {
     return false;
 }
 Log.i(TAG, String.format("Start recognition \"%s\"",
         new Object[] { searchName }));
 this.decoder.setSearch(searchName);
 this.recognizerThread = new RecognizerThread();
 this.recognizerThread.start();
 return true;
}

public boolean startListening(String searchName, int timeout) {
 if (null != this.recognizerThread) {
     return false;
 }
 Log.i(TAG, String.format("Start recognition \"%s\"",
         new Object[] { searchName }));
 this.decoder.setSearch(searchName);
 this.recognizerThread = new RecognizerThread(timeout);
 this.recognizerThread.start();
 return true;
}

private boolean stopRecognizerThread() {
 Log.e(SpeechRecognizer.TAG,
         "------------      STOP  1   stopRecognizerThread");

 if (null == this.recognizerThread)
     return false;
 try {
     this.recognizerThread.interrupt();
     this.recognizerThread.join();
 } catch (InterruptedException e) {
     Thread.currentThread().interrupt();
 }

 this.recognizerThread = null;
 Log.e(SpeechRecognizer.TAG,
         "------------      STOP  2   stopRecognizerThread");

 return true;
}

public boolean stop() {
 boolean result = stopRecognizerThread();
 if (result) {
     Log.i(TAG, "Stop recognition");
     Hypothesis hypothesis = this.decoder.hyp();
     this.mainHandler.post(new ResultEvent(hypothesis, true));
 }
 return result;
}

public boolean cancel() {
 boolean result = stopRecognizerThread();
 if (result) {
     Log.i(TAG, "Cancel recognition");
 }
 return result;
}

public void shutdown() {
 this.recorder.release();
}

public Decoder getDecoder() {
 /* 201 */return this.decoder;
}

public String getSearchName() {
 /* 217 */return this.decoder.getSearch();
}

public void addFsgSearch(String searchName, FsgModel fsgModel) {
 /* 221 */this.decoder.setFsg(searchName, fsgModel);
}

public void addGrammarSearch(String name, File file) {
 /* 233 */Log.i(TAG,
         String.format("Load JSGF %s", new Object[] { file }));
 /* 234 */this.decoder.setJsgfFile(name, file.getPath());
}

public void addNgramSearch(String name, File file) {
 /* 246 */Log.i(TAG,
         String.format("Load N-gram model %s", new Object[] { file }));
 /* 247 */this.decoder.setLmFile(name, file.getPath());
}

public void addKeyphraseSearch(String name, String phrase) {
 /* 259 */this.decoder.setKeyphrase(name, phrase);
}

public void addKeywordSearch(String name, File file) {
 /* 276 */this.decoder.setKws(name, file.getPath());
}

public void addAllphoneSearch(String name, File file) {
 /* 288 */this.decoder.setAllphoneFile(name, file.getPath());
}

private class TimeoutEvent extends SpeechRecognizer.RecognitionEvent {
 private TimeoutEvent() {
     /* 432 */super();
 }

 protected void execute(RecognitionListener listener) {
     /* 435 */listener.onTimeout();
 }

}

private class OnErrorEvent extends SpeechRecognizer.RecognitionEvent {
 private final Exception exception;

 OnErrorEvent(Exception exception) {
     /* 422 */super();
     /* 423 */this.exception = exception;
 }

 protected void execute(RecognitionListener listener) {
     /* 428 */listener.onError(this.exception);
 }

}

private class ResultEvent extends SpeechRecognizer.RecognitionEvent {
 protected final Hypothesis hypothesis;
 private final boolean finalResult;

 ResultEvent(Hypothesis hypothesis, boolean finalResult) {
     /* 405 */super();
     /* 406 */this.hypothesis = hypothesis;
     /* 407 */this.finalResult = finalResult;
 }

 protected void execute(RecognitionListener listener) {
     /* 412 */if (this.finalResult)
         /* 413 */listener.onResult(this.hypothesis);
     else
         /* 415 */listener.onPartialResult(this.hypothesis);
 }

}

private class InSpeechChangeEvent extends SpeechRecognizer.RecognitionEvent {
 private final boolean state;

 InSpeechChangeEvent(boolean state) {
     /* 388 */super();
     /* 389 */this.state = state;
 }

 protected void execute(RecognitionListener listener) {
     /* 394 */if (this.state)
         /* 395 */listener.onBeginningOfSpeech();
     else
         /* 397 */listener.onEndOfSpeech();
 }

}

private abstract class RecognitionEvent implements Runnable {
 private RecognitionEvent()
 // TODO rest data

 {
 }

 public void run() {
     /* 377 */RecognitionListener[] emptyArray = new RecognitionListener[0];
     /* 378 */for (RecognitionListener listener : (RecognitionListener[]) SpeechRecognizer.this.listeners
             .toArray(emptyArray))
         /* 379 */execute(listener);
 }

 protected abstract void execute(
         RecognitionListener paramRecognitionListener);

}

private final class RecognizerThread extends Thread {
 private int remainingSamples;
 private int timeoutSamples;
 private static final int NO_TIMEOUT = -1;

 public RecognizerThread(int timeout) {
     if (timeout != -1)
         this.timeoutSamples = (timeout
                 * SpeechRecognizer.this.sampleRate / 1000);
     else
         this.timeoutSamples = -1;
     this.remainingSamples = this.timeoutSamples;
 }

 public RecognizerThread() {
     this(-1);
 }

 public synchronized void run() {
     SpeechRecognizer.this.recorder.startRecording();
     if (SpeechRecognizer.this.recorder.getRecordingState() == 1) {
         SpeechRecognizer.this.recorder.stop();
         IOException ioe = new IOException(
                 "Failed to start recording. Microphone might be already in use.");

         SpeechRecognizer.this.mainHandler
                 .post(new SpeechRecognizer.OnErrorEvent(ioe));
         return;
     }

     Log.d(SpeechRecognizer.TAG, "Starting decoding");
     // if (SpeechRecognizer.this.decoder != null
     // && SpeechRecognizer.this.decoder.hyp() != null) {
     // Log.e("@hyp", "----hyp().delete()");
     // SpeechRecognizer.this.decoder.hyp().delete();
     // }

     try {
         SpeechRecognizer.this.decoder.startUtt();
     } catch (java.lang.RuntimeException e) {
     }

     short[] buffer = new short[SpeechRecognizer.this.bufferSize];
     boolean inSpeech = SpeechRecognizer.this.decoder.getInSpeech();

     SpeechRecognizer.this.recorder.read(buffer, 0, buffer.length);

     while ((!interrupted())
             && ((this.timeoutSamples == -1) || (this.remainingSamples > 0))) {
         int nread = SpeechRecognizer.this.recorder.read(buffer, 0,
                 buffer.length);

         if (-1 == nread)
             throw new RuntimeException("error reading audio buffer");
         if (nread > 0) {
             SpeechRecognizer.this.decoder.processRaw(buffer, nread,
                     false, false);

             if (SpeechRecognizer.this.decoder.getInSpeech() != inSpeech) {
                 inSpeech = SpeechRecognizer.this.decoder.getInSpeech();
                 SpeechRecognizer.this.mainHandler
                         .post(new SpeechRecognizer.InSpeechChangeEvent(
                                 inSpeech));
             }

             if (inSpeech) {
                 this.remainingSamples = this.timeoutSamples;
             }
             Hypothesis hypothesis = SpeechRecognizer.this.decoder.hyp();
             SpeechRecognizer.this.mainHandler
                     .post(new SpeechRecognizer.ResultEvent(hypothesis,
                             false));
         }

         if (this.timeoutSamples != -1) {
             this.remainingSamples -= nread;
         }
     }
     SpeechRecognizer.this.recorder.stop();
     Log.e(SpeechRecognizer.TAG, "------------STOP1");
     // TODO
     // SpeechRecognizer.this.decoder.endUtt();
     Log.e(SpeechRecognizer.TAG, "------------STOP2");
     SpeechRecognizer.this.mainHandler.removeCallbacksAndMessages(null);
     if ((this.timeoutSamples != -1) && (this.remainingSamples <= 0))
         SpeechRecognizer.this.mainHandler
                 .post(new SpeechRecognizer.TimeoutEvent());
 }
}

}

this is log

10-29 14:46:14.648: E/@onBeginningOfSpeech(26199): ---onBeginningOfSpeech
10-29 14:46:17.531: I/cmusphinx(26199): INFO: cmn_prior.c(99): cmn_prior_update: from <
10-29 14:46:17.531: I/cmusphinx(26199): 39.67
10-29 14:46:17.531: I/cmusphinx(26199): 5.70
10-29 14:46:17.531: I/cmusphinx(26199): 11.20
10-29 14:46:17.531: I/cmusphinx(26199): 8.09
10-29 14:46:17.531: I/cmusphinx(26199): 1.80
10-29 14:46:17.531: I/cmusphinx(26199): -0.53
10-29 14:46:17.531: I/cmusphinx(26199): -4.15
10-29 14:46:17.531: I/cmusphinx(26199): -0.46
10-29 14:46:17.531: I/cmusphinx(26199): 2.15
10-29 14:46:17.531: I/cmusphinx(26199): 0.37
10-29 14:46:17.531: I/cmusphinx(26199): 2.55
10-29 14:46:17.531: I/cmusphinx(26199): -3.37
10-29 14:46:17.531: I/cmusphinx(26199): 0.76
10-29 14:46:17.531: I/cmusphinx(26199): >
10-29 14:46:17.531: I/cmusphinx(26199): INFO: cmn_prior.c(116): cmn_prior_update: to <
10-29 14:46:17.531: I/cmusphinx(26199): 42.06
10-29 14:46:17.531: I/cmusphinx(26199): 6.03
10-29 14:46:17.531: I/cmusphinx(26199): 10.39
10-29 14:46:17.531: I/cmusphinx(26199): 6.88
10-29 14:46:17.531: I/cmusphinx(26199): 1.63
10-29 14:46:17.531: I/cmusphinx(26199): -1.73
10-29 14:46:17.531: I/cmusphinx(26199): -6.19
10-29 14:46:17.531: I/cmusphinx(26199): 0.79
10-29 14:46:17.531: I/cmusphinx(26199): 1.07
10-29 14:46:17.531: I/cmusphinx(26199): 1.67
10-29 14:46:17.531: I/cmusphinx(26199): 3.15
10-29 14:46:17.531: I/cmusphinx(26199): -4.24
10-29 14:46:17.531: I/cmusphinx(26199): 1.68
10-29 14:46:17.531: I/cmusphinx(26199): >
10-29 14:46:18.567: V/AudioRecord(26199): Overrun user: 29800, server: 2a800, flags 0000
10-29 14:46:18.794: E/@onPartialResult(26199): -------------------- أولائك الذين اشتروا فى ولهم بالهدىا
10-29 14:46:18.794: D/dalvikvm(26199): create interp thread : stack size=128KB
10-29 14:46:18.794: D/dalvikvm(26199): create new thread
10-29 14:46:18.794: D/dalvikvm(26199): new thread created
10-29 14:46:18.794: D/dalvikvm(26199): update thread list
10-29 14:46:18.794: D/dalvikvm(26199): threadid=21: interp stack at 0x5f5ff000
10-29 14:46:18.794: D/dalvikvm(26199): threadid=21: created from interp
10-29 14:46:18.794: D/dalvikvm(26199): start new thread
10-29 14:46:18.795: E/@onEndOfSpeech(26199): أولائك الذين اشتروا فى ولهم بالهدىا
10-29 14:46:18.795: E/@TimeOut(26199): ----1
10-29 14:46:18.795: D/dalvikvm(26199): threadid=21: notify debugger
10-29 14:46:18.795: D/dalvikvm(26199): threadid=21 (Thread-4637): calling run()
10-29 14:46:18.799: E/@(26199): أولائك الذين اشتروا فى ولهم بالهدىا
10-29 14:46:18.800: E/@(26199): ????????????????????????? latestIndex: 116 T: 5282 countFail: 0
10-29 14:46:18.800: E/@Diff(26199): result: أولائك الذين اشتروا فى ولهم بالهدىا
10-29 14:46:18.800: E/@000(26199): oToken: أولائك lIndex: 116 == oIndex: 116 index: 0 == tmpIndex: -1
10-29 14:46:18.800: E/@1(26199): ??
10-29 14:46:18.800: E/@000(26199): oToken: الذين lIndex: 117 == oIndex: 117 index: 1 == tmpIndex: 0
10-29 14:46:18.800: E/@1(26199): ??
10-29 14:46:18.800: E/@000(26199): oToken: اشتروا lIndex: 118 == oIndex: 118 index: 2 == tmpIndex: 1
10-29 14:46:18.800: E/@1(26199): ??
10-29 14:46:18.800: E/@(26199): ========================== latestIndex: 119 T: 5282 countFail: 0
10-29 14:46:18.800: D/dalvikvm(26199): create interp thread : stack size=128KB
10-29 14:46:18.800: D/dalvikvm(26199): create new thread
10-29 14:46:18.801: D/dalvikvm(26199): new thread created
10-29 14:46:18.801: D/dalvikvm(26199): update thread list
10-29 14:46:18.801: D/dalvikvm(26199): threadid=22: interp stack at 0x60cdb000
10-29 14:46:18.801: D/dalvikvm(26199): threadid=22: created from interp
10-29 14:46:18.801: D/dalvikvm(26199): start new thread
10-29 14:46:18.801: D/dalvikvm(26199): threadid=21: exiting
10-29 14:46:18.801: D/dalvikvm(26199): threadid=21: bye!
10-29 14:46:18.801: D/dalvikvm(26199): threadid=22: notify debugger
10-29 14:46:18.801: D/dalvikvm(26199): threadid=22 (Thread-4638): calling run()
10-29 14:46:18.863: I/SurfaceTextureClient(26199): [STC::queueBuffer] (this:0x5eb35250) fps:1.70, dur:5292.52, max:5030.57, min:16.75
10-29 14:46:18.863: I/SurfaceTextureClient(26199): [STC::queueBuffer] this:0x5eb35250, api:1, last queue time elapsed:5030.57
10-29 14:46:18.913: D/dalvikvm(26199): threadid=22: exiting
10-29 14:46:18.913: D/dalvikvm(26199): threadid=22: bye!
10-29 14:46:19.795: E/@TimeOut(26199): ----2
10-29 14:46:20.796: E/@TimeOut(26199): ----3
10-29 14:46:21.156: E/@onBeginningOfSpeech(26199): ---onBeginningOfSpeech
10-29 14:46:22.663: I/cmusphinx(26199): INFO: cmn_prior.c(99): cmn_prior_update: from <
10-29 14:46:22.663: I/cmusphinx(26199): 42.06
10-29 14:46:22.663: I/cmusphinx(26199): 6.03
10-29 14:46:22.663: I/cmusphinx(26199): 10.39
10-29 14:46:22.663: I/cmusphinx(26199): 6.88
10-29 14:46:22.663: I/cmusphinx(26199): 1.63
10-29 14:46:22.663: I/cmusphinx(26199): -1.73
10-29 14:46:22.663: I/cmusphinx(26199): -6.19
10-29 14:46:22.663: I/cmusphinx(26199): 0.79
10-29 14:46:22.663: I/cmusphinx(26199): 1.07
10-29 14:46:22.663: I/cmusphinx(26199): 1.67
10-29 14:46:22.663: I/cmusphinx(26199): 3.15
10-29 14:46:22.663: I/cmusphinx(26199): -4.24
10-29 14:46:22.663: I/cmusphinx(26199): 1.68
10-29 14:46:22.663: I/cmusphinx(26199): >
10-29 14:46:22.663: I/cmusphinx(26199): INFO: cmn_prior.c(116): cmn_prior_update: to <
10-29 14:46:22.663: I/cmusphinx(26199): 43.01
10-29 14:46:22.663: I/cmusphinx(26199): 6.76
10-29 14:46:22.663: I/cmusphinx(26199): 8.93
10-29 14:46:22.663: I/cmusphinx(26199): 6.59
10-29 14:46:22.663: I/cmusphinx(26199): 2.11
10-29 14:46:22.663: I/cmusphinx(26199): -3.37
10-29 14:46:22.663: I/cmusphinx(26199): -7.41
10-29 14:46:22.663: I/cmusphinx(26199): 0.12
10-29 14:46:22.663: I/cmusphinx(26199): 1.30
10-29 14:46:22.663: I/cmusphinx(26199): 2.02
10-29 14:46:22.663: I/cmusphinx(26199): 4.09
10-29 14:46:22.663: I/cmusphinx(26199): -3.65
10-29 14:46:22.663: I/cmusphinx(26199): 0.98
10-29 14:46:22.663: I/cmusphinx(26199): >
10-29 14:46:25.724: V/AudioRecord(26199): Overrun user: 37800, server: 38800, flags 0000
10-29 14:46:25.734: V/AudioRecord(26199): Overrun user: 37800, server: 38800, flags 0001
10-29 14:46:25.744: V/AudioRecord(26199): Overrun user: 37800, server: 38800, flags 0001
10-29 14:46:25.754: V/AudioRecord(26199): Overrun user: 37800, server: 38800, flags 0001
10-29 14:46:26.018: V/AudioRecord(26199): Overrun user: 38000, server: 39000, flags 0000
10-29 14:46:26.028: V/AudioRecord(26199): Overrun user: 38000, server: 39000, flags 0001
10-29 14:46:26.031: I/cmusphinx(26199): INFO: cmn_prior.c(99): cmn_prior_update: from <
10-29 14:46:26.031: I/cmusphinx(26199): 43.01
10-29 14:46:26.031: I/cmusphinx(26199): 6.76
10-29 14:46:26.031: I/cmusphinx(26199): 8.93
10-29 14:46:26.031: I/cmusphinx(26199): 6.59
10-29 14:46:26.031: I/cmusphinx(26199): 2.11
10-29 14:46:26.031: I/cmusphinx(26199): -3.37
10-29 14:46:26.031: I/cmusphinx(26199): -7.41
10-29 14:46:26.031: I/cmusphinx(26199): 0.12
10-29 14:46:26.031: I/cmusphinx(26199): 1.30
10-29 14:46:26.031: I/cmusphinx(26199): 2.02
10-29 14:46:26.031: I/cmusphinx(26199): 4.09
10-29 14:46:26.031: I/cmusphinx(26199): -3.65
10-29 14:46:26.031: I/cmusphinx(26199): 0.98
10-29 14:46:26.031: I/cmusphinx(26199): >
10-29 14:46:26.031: I/cmusphinx(26199): INFO: cmn_prior.c(116): cmn_prior_update: to <
10-29 14:46:26.031: I/cmusphinx(26199): 41.83
10-29 14:46:26.031: I/cmusphinx(26199): 6.59
10-29 14:46:26.031: I/cmusphinx(26199): 10.78
10-29 14:46:26.031: I/cmusphinx(26199): 8.08
10-29 14:46:26.031: I/cmusphinx(26199): 2.44
10-29 14:46:26.031: I/cmusphinx(26199): -2.15
10-29 14:46:26.031: I/cmusphinx(26199): -4.06
10-29 14:46:26.031: I/cmusphinx(26199): -0.39
10-29 14:46:26.031: I/cmusphinx(26199): -0.74
10-29 14:46:26.031: I/cmusphinx(26199): 4.05
10-29 14:46:26.031: I/cmusphinx(26199): 3.37
10-29 14:46:26.031: I/cmusphinx(26199): -2.10
10-29 14:46:26.031: I/cmusphinx(26199): 1.29
10-29 14:46:26.031: I/cmusphinx(26199): >
10-29 14:46:26.264: V/AudioRecord(26199): Overrun user: 38800, server: 39800, flags 0000
10-29 14:46:26.274: V/AudioRecord(26199): Overrun user: 38800, server: 39800, flags 0001
10-29 14:46:26.284: V/AudioRecord(26199): Overrun user: 38800, server: 39800, flags 0001
10-29 14:46:26.517: V/AudioRecord(26199): Overrun user: 39000, server: 3a000, flags 0000
10-29 14:46:26.527: V/AudioRecord(26199): Overrun user: 39000, server: 3a000, flags 0001
10-29 14:46:26.537: V/AudioRecord(26199): Overrun user: 39000, server: 3a000, flags 0001
10-29 14:46:26.547: V/AudioRecord(26199): Overrun user: 39000, server: 3a000, flags 0001
10-29 14:46:26.565: V/AudioRecord(26199): Overrun user: 39000, server: 3a000, flags 0001
10-29 14:46:26.647: I/mtk_dlmalloc_debug(26199): [DEBUG_INFO]FUNCTION tmalloc_large Line 5824 address 61930f68 function 19 action 1554466480 structure type 2 error_member 400 mstate 1 DEBUG be9aa418
10-29 14:46:26.647: A/libc(26199): Fatal signal 11 (SIGSEGV) at 0xdeadbaad (code=1), thread 26199 (android.moallem)
10-29 14:46:26.648: A/libc(26199): Send stop signal to pid:26199 in debugger_signal_handler