diff --git a/README.md b/README.md index 7ae32456d..b2706b09c 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ # **RoboTutor** -Welcome to RoboTutor: +Welcome to RoboTutor: ## **Setup and Configuration:** @@ -12,7 +12,7 @@ Welcome to RoboTutor: [Install GitHub Desktop](https://desktop.github.com/)
-RoboTutor uses a large volume of external assets at runtime. To successfully run RoboTutor you must first install these assets on your target device. The [RTAsset_Publisher](https://github.com/synaptek/RTAsset_Publisher) is the tool you can use to push the Rt assets to your device. Once you have cloned and run the associated tools to push the data assets to your device you can proceed with building RoboTutor. +RoboTutor uses a large volume of external assets at runtime. To successfully run RoboTutor you must first install these assets on your target device. The [RTAsset_Publisher](https://github.com/RoboTutorLLC/RTAsset_Publisher) is the tool you can use to push the Rt assets to your device. Once you have cloned and run the associated tools to push the data assets to your device you can proceed with building RoboTutor. ## **Building RoboTutor:** @@ -21,12 +21,12 @@ RoboTutor uses a large volume of external assets at runtime. To successfully ru 2. **Import** the RoboTutor project into Android Studio. -3. You may need to install different versions of the build tools and android SDKs. - -4. There are a number of build variants you can select to generate versions that support static language selections and also vesions that permit dynamic language selection at runtime. In order to generate any flavor that depends on the key signature, you must generate your own keystore (see next steps). Note that the version used in the XPrize code drop 1 submission usees flavor *release_sw*, which depends on a signed APK. - - -5. If you do not already have one, follow the steps [here](https://stackoverflow.com/questions/3997748/how-can-i-create-a-keystore) to generate a keystore. +3. You may need to install different versions of the build tools and android SDKs. + +4. There are a number of build variants you can select to generate versions that support static language selections and also vesions that permit dynamic language selection at runtime. In order to generate any flavor that depends on the key signature, you must generate your own keystore (see next steps). Note that the version used in the XPrize code drop 1 submission usees flavor *release_sw*, which depends on a signed APK. + + +5. If you do not already have one, follow the steps [here](https://stackoverflow.com/questions/3997748/how-can-i-create-a-keystore) to generate a keystore. 6. Add a file named "keystore.properties" to your root project directory, and give it the following contents. The values should be based on the values you used to generate the keystore. ``` @@ -35,7 +35,7 @@ keyPassword= keyAlias= storeFile= ``` - + 7. Use Android Studio or gradlew to generate a signed APK with the flavor *release_sw*. This will generate the file *robotutor.release_sw.1.8.8.1.apk*. This APK should be transferred to the apk in your local SystemBuild directory. @@ -45,12 +45,12 @@ storeFile= ## **XPrize Submission:** The following repositories are part of the Team-RoboTutor entry: - * XPRIZE/GLEXP-Team-RoboTutor-RoboTutor + * XPRIZE/GLEXP-Team-RoboTutor-RoboTutor * XPRIZE/GLEXP-Team-RoboTutor-SystemBuild * XPRIZE/GLEXP-Team-RoboTutor-RTAsset_Publisher * XPRIZE/GLEXP-Team-RoboTutor-CodeDrop1-Assets - * XPRIZE/GLEXP-Team-RoboTutor-RoboLauncher - * XPRIZE/GLEXP-Team-RoboTutor-RoboTransfer + * XPRIZE/GLEXP-Team-RoboTutor-RoboLauncher + * XPRIZE/GLEXP-Team-RoboTutor-RoboTransfer diff --git a/app/src/README.md b/app/src/README.md index fe2d6edc5..8762e4cf0 100644 --- a/app/src/README.md +++ b/app/src/README.md @@ -74,7 +74,7 @@ "tutorVariants" supporting [encfolder] encodings ---------------------------------------------------------- - "story.echo", "story.hear", "story.read" + "story.echo", "story.hear", "story.read", "story.parrot", "story.hide", "story.reveal" Tutors with large numbers of datasources use the [encfolder] encoding scheme. With this encoding the tutor descriptor is combined with the datasource descriptor in the following manner. diff --git a/app/src/main/assets/tutors/engine_descriptor.json b/app/src/main/assets/tutors/engine_descriptor.json index 16e310843..89b49388c 100644 --- a/app/src/main/assets/tutors/engine_descriptor.json +++ b/app/src/main/assets/tutors/engine_descriptor.json @@ -259,6 +259,11 @@ "tutorName": "story_reading", "features": "FTR_USER_PARROT" }, + "story.prompt": { + "type": "TUTORVAR_MAP", + "tutorName": "story_reading", + "features": "FTR_USER_PROMPT" + }, "story.gen.hide": { "type": "TUTORVAR_MAP", "tutorName": "story_questions", diff --git a/app/src/main/assets/tutors/story_reading/animator_graph.json b/app/src/main/assets/tutors/story_reading/animator_graph.json index 1b3acab10..553b8ba00 100644 --- a/app/src/main/assets/tutors/story_reading/animator_graph.json +++ b/app/src/main/assets/tutors/story_reading/animator_graph.json @@ -21,8 +21,7 @@ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#prompt,value#SPEAK_WORD_BEHAVIOR:String", "features": ""}, {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.currentWord}}.mp3", "soundpackage": "words", "mode": "flow", "features": ""}, - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_WORD:String" , "features": ""} - + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_WORD:String", "features": ""} ], "preexit": [], "edges": [] @@ -38,9 +37,9 @@ // NOTE: the button must be in a valid state before showing // TODO: put check in to catch invalid behaviors - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_CLICK:String|SPEAK_WORD_BEHAVIOR:String" , "features": ""}, - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "ENABLE:String", "features": ""}, - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "SHOW:String", "features": ""} + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_CLICK:String|SPEAK_WORD_BEHAVIOR:String", "features": ""}, + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "ENABLE:String", "features": ""}, + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "SHOW:String", "features": ""} ] }, @@ -53,7 +52,7 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#behavior,value#NARRATE_SENTENCE_BEHAVIOR:String", "features": ""}, - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "post", "parms": "START_NARRATION:String|1000:long" , "features": ""} + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "post", "parms": "START_NARRATION:String|1000:long", "features": ""} ], "preexit": [], "edges": [] @@ -67,7 +66,8 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#behavior,value#SPEAK_UTTERANCE_BEHAVIOR:String", "features": ""}, - {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.utterance}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT" , "soundpackage": "story", "mode": "flow", "features": ""} + {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.utterance}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT", "mode": "flow", "features": "FTR_PROMPT"}, + {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.utterance}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT", "soundpackage": "story", "mode": "flow", "features": "!FTR_PROMPT"} ], "preexit": [], "edges": [] @@ -81,13 +81,13 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#behavior,value#SPEAK_SENTENCE_BEHAVIOR:String", "features": ""}, - {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.sentence}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT", "soundpackage": "story", "mode": "flow", "features": ""}, - {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "nextSentence" , "features": ""} + {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.sentence}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT", "mode": "flow", "features": "FTR_PROMPT"}, + {"type": "QUEUEDAUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.sentence}}.mp3", "listeners": "SstoryReading", "oncomplete": "TRACK_SEGMENT", "soundpackage": "story", "mode": "flow", "features": "!FTR_PROMPT"}, + {"type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "nextSentence", "features": ""} ], "preexit": [], "edges": [] } - }, "nodeMap": { @@ -99,7 +99,7 @@ "COMMENT": "Intro Clip", "preenter": ["SET_VERSION"], "maptype": "moduleMap", - "mapname": "PLAYINTRO", + "mapname": "PLAY_INTRO", "preexit": [], "edges": [ {"constraint": "", "edge": "NEXT_STEP"} @@ -132,6 +132,18 @@ "mapname": "START_STORY", "preenter": ["SET_NARRATOR", "SET_UTTERANCE"], "preexit": [], + "edges": [ + {"constraint": "", "edge": "PLAY_PROMPT_NODE"} + ] + }, + + "PLAY_PROMPT_NODE": { + "type": "NODE", + "COMMENT": "Play mode prompt", + "preenter": [], + "maptype": "moduleMap", + "mapname": "PLAY_PROMPT", + "preexit": [], "edges": [ {"constraint": "", "edge": "LISTEN"} ] @@ -202,7 +214,7 @@ "PARROT_LINE_NODE": { "type": "NODE", "COMMENT": "When in parrot mode - we listen to the student repeat the last line", - "maptype": "moduleMap", + "maptype": "actionMap", "mapname": "PARROT_LINE", "preenter": [], "preexit": [], @@ -213,7 +225,7 @@ "NEXT_WORD_NODE": { "type": "NODE", - "COMMENT": "When module is complete - move to next scene in the scenegraph", + "COMMENT": "When word is complete - move to next word in the scenegraph", "maptype": "moduleMap", "mapname": "PREP_NEXT_WORD", "preenter": [], @@ -225,43 +237,43 @@ "NEXT_LINE_NODE": { "type": "NODE", - "COMMENT": "When module is complete - move to next scene in the scenegraph", + "COMMENT": "When line is complete - move to next line in the scenegraph", "maptype": "actionMap", "mapname": "NEXT_LINE", "preenter": [], "preexit": [], "edges": [ - {"constraint": "", "edge": "LISTEN"} + {"constraint": "", "edge": "PLAY_PROMPT_NODE"} ] }, "NEXT_PARA_NODE": { "type": "NODE", - "COMMENT": "When module is complete - move to next scene in the scenegraph", + "COMMENT": "When paragraph is complete - move to next paragraph in the scenegraph", "maptype": "actionMap", "mapname": "NEXT_PARA", "preenter": [], "preexit": [], "edges": [ - {"constraint": "", "edge": "LISTEN"} + {"constraint": "", "edge": "PLAY_PROMPT_NODE"} ] }, "NEXT_PAGE_NODE": { "type": "NODE", - "COMMENT": "When module is complete - move to next scene in the scenegraph", - "maptype": "moduleMap", - "mapname": "PAGEFLIP", + "COMMENT": "When page is complete - move to next page in the scenegraph", + "maptype": "actionMap", + "mapname": "NEXT_PAGE", "preenter": [], "preexit": [], "edges": [ - {"constraint": "", "edge": "LISTEN"} + {"constraint": "", "edge": "PLAY_PROMPT_NODE"} ] }, "NEXT_SCENE": { "type": "NODE", - "COMMENT": "When module is complete - move to next scene in the scenegraph", + "COMMENT": "When scene is complete - move to next scene in the scenegraph", "maptype": "moduleMap", "mapname": "SCENEFLIP", "preenter": [], @@ -274,54 +286,45 @@ "moduleMap": { "COMMENT": "@@@@@ CModules @@@@@", - "PLAYINTRO": { + + "PLAY_INTRO": { "type": "MODULE", "reuse": true, "COMMENT": "TBD", "tracks": [ - {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAYINTRO:String", "features": ""}, - - // Note that the sw actually says "please read the story aloud' - // - {"type": "AUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.prompt}}", "mode": "flow", "features": "FTR_PROMPT"}, - {"type": "AUDIO", "command": "PLAY", "soundsource": "Please read aloud.mp3", "mode": "flow", "features": "!FTR_PROMPT&FTR_USER_READ|!FTR_PROMPT&FTR_USER_ECHO|!FTR_PROMPT&FTR_USER_REVEAL"}, - //{"type": "AUDIO", "command": "PLAY", "soundsource": "Now lets listen to a story.mp3", "mode": "flow", "features": "!FTR_PROMPT&FTR_USER_HEAR|!FTR_PROMPT&FTR_USER_HIDE"}, - {"type": "AUDIO", "command": "PLAY", "soundsource": "Listen carefully.mp3", "mode": "flow", "features": "!FTR_PROMPT&FTR_USER_HEAR|!FTR_PROMPT&FTR_USER_HIDE"}, - {"type": "AUDIO", "command": "PLAY", "soundsource": "Please listen and repeat after me.mp3", "mode": "flow", "features": "!FTR_PROMPT&FTR_USER_PARROT"} + {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAYINTRO:String", "features": ""} ] }, - "LISTENING": { + "PLAY_PROMPT": { "type": "MODULE", "reuse": true, - "COMMENT": "Listening Module", + "COMMENT": "TBD", "tracks": [ - {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#LISTENING:String", "features": ""}, + {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAY_PROMPT:String", "features": ""}, - {"type": "COMMAND", "cmd": "WAIT"} + {"type": "AUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.prompt}}", "mode": "flow", "features": "FTR_PLAY_PROMPT"} ] }, - "PLAYCORRECT": { + "LISTENING": { "type": "MODULE", "reuse": true, - "COMMENT": "TBD", + "COMMENT": "Listening Module", "tracks": [ - {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAYCORRECT:String", "features": ""} + {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#LISTENING:String", "features": ""}, + {"type": "COMMAND", "id": "SstoryReading", "method": "continueListening", "features": ""}, + {"type": "COMMAND", "cmd": "WAIT"} ] }, - "PARROT_LINE": { + "PLAYCORRECT": { "type": "MODULE", "reuse": true, "COMMENT": "TBD", "tracks": [ - {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PARROT_LINE:String", "features": ""}, - - {"type": "AUDIO", "command": "PLAY", "soundsource": "{{SstoryReading.page_prompt}}", "mode": "flow", "features": "FTR_PAGE_PROMPT"}, - {"type": "AUDIO", "command": "PLAY", "soundsource": "Repeat after me.mp3", "mode": "flow", "features": "!FTR_PAGE_PROMPT"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "parrotLine" , "features": ""} + {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAYCORRECT:String", "features": ""} ] }, @@ -343,7 +346,7 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#CHECK_COMPLETE:String", "features": ""}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""} + {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""} ] }, @@ -363,8 +366,8 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PREP_NEXT_WORD:String", "features": ""}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"} + {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""}, + {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"} ] }, @@ -375,15 +378,15 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PAGEFLIP:String", "features": ""}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": "FTR_MANUAL"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_PAGE_FLIP:String|NEXT_NODE:String" , "features": "FTR_MANUAL"}, + {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": "FTR_MANUAL"}, + {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_PAGE_FLIP:String|NEXT_NODE:String", "features": "FTR_MANUAL"}, {"type": "COMMAND", "cmd": "WAIT", "features": "FTR_MANUAL"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"}, + {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_PAGE:String|500:long" , "features": "!FTR_MANUAL"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_PAGE:String" , "features": "FTR_MANUAL"} + {"type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_PAGE:String|500:long", "features": "!FTR_MANUAL"}, + {"type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_PAGE:String", "features": "FTR_MANUAL"} ] }, @@ -394,15 +397,15 @@ "tracks": [ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#SCENEFLIP:String", "features": ""}, - {"name": "ENABLE_PAGEFLIP", "type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": "FTR_MANUAL"}, - {"name": "SET_FLIP_BEHAVIOR", "type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_PAGE_FLIP:String|NEXT_NODE:String" , "features": "FTR_MANUAL"}, + {"name": "ENABLE_PAGEFLIP", "type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": "FTR_MANUAL"}, + {"name": "SET_FLIP_BEHAVIOR", "type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_PAGE_FLIP:String|NEXT_NODE:String", "features": "FTR_MANUAL"}, - {"name": "WAIT_FOR_BUTTON", "type": "COMMAND", "cmd": "WAIT", "features": "FTR_MANUAL"}, + {"name": "WAIT_FOR_BUTTON", "type": "COMMAND", "cmd": "WAIT", "features": "FTR_MANUAL"}, - {"name": "DISABLE_PAGEFLIP", "type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"}, + {"name": "DISABLE_PAGEFLIP", "type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": "FTR_MANUAL"}, - {"name": "POST-NEXT_SCENE delayed", "type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_SCENE:String|2000:long" , "features": "!FTR_MANUAL"}, - {"name": "POST-NEXT_SCENE", "type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_SCENE:String" , "features": "FTR_MANUAL"}, + {"name": "POST-NEXT_SCENE delayed", "type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_SCENE:String|2000:long", "features": "!FTR_MANUAL"}, + {"name": "POST-NEXT_SCENE", "type": "COMMAND", "id": "SstoryReading", "method": "post", "parms": "NEXT_SCENE:String", "features": "FTR_MANUAL"}, {"name": "WAIT_POSTED_EXIT", "type": "COMMAND", "cmd": "WAIT", "features": ""} ] @@ -416,7 +419,7 @@ {"name": "LOG_EVENT", "type": "QUEUEDCOMMAND", "id": "SstoryReading", "method": "logState", "parms": "type#node,value#PLAYWRONG:String", "features": ""}, {"type": "COMMAND", "id": "SstoryReading", "method": "setHighLight", "parms": "red:String"}, - {"type": "COMMAND", "id": "SstoryReading", "method": "continueListening" , "features": ""} + {"type": "COMMAND", "id": "SstoryReading", "method": "continueListening", "features": ""} ] } }, @@ -427,68 +430,69 @@ "SET_VERSION": {"type": "COMMAND", "id": "Sbanner", "method": "setVersionID", "parms": "v.0.0.1:String", "features": ""}, - "START_STORY": {"type": "COMMAND", "id": "SstoryReading", "method": "startStory", "features": ""}, + "START_STORY": {"type": "COMMAND", "id": "SstoryReading", "method": "startStory", "features": ""}, "INIT_LISTENER": {"type": "COMMAND", "id": "SstoryReading", "method": "initListener", "features": "FTR_USER_PARROT"}, - "HESTITATION_TIMER": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_TIMED_START_EVENT:String|HESITATION_PROMPT:String|5000:Integer" , "features": ""}, - "HESTITATION_RESET": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_TIMED_START_EVENT:String|NULL:String|0:Integer" , "features": ""}, + "HESITATION_TIMER": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_TIMED_START_EVENT:String|HESITATION_PROMPT:String|5000:Integer", "features": ""}, + "HESITATION_RESET": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_TIMED_START_EVENT:String|NULL:String|0:Integer", "features": ""}, - "SET_WORD_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_WORD_EVENT:String|HESTITATION_RESET:String" , "features": ""}, - "RESET_WORD_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_WORD_EVENT:String|NULL:String" , "features": ""}, + "SET_WORD_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_WORD_EVENT:String|HESITATION_RESET:String", "features": ""}, + "RESET_WORD_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_WORD_EVENT:String|NULL:String", "features": ""}, - "SET_UTTERANCE_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "UTTERANCE_COMPLETE_EVENT:String|NEXT_NODE:String" , "features": ""}, - "SET_HYPOTHESIS_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_RECOGNITION_EVENT:String|NEXT_NODE:String" , "features": ""}, - "CLR_HYPOTHESIS_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_RECOGNITION_EVENT:String|NULL:String" , "features": ""}, + "SET_UTTERANCE_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "UTTERANCE_COMPLETE_EVENT:String|NEXT_NODE:String", "features": ""}, + "SET_HYPOTHESIS_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_RECOGNITION_EVENT:String|NEXT_NODE:String", "features": ""}, + "CLR_HYPOTHESIS_BEHAVIOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ASR_RECOGNITION_EVENT:String|NULL:String", "features": ""}, "HIGHLIGHT_ERROR": {"type": "COMMAND", "id": "SstoryReading", "method": "setHighLight", "parms": "red:String"}, - "CONTINUE": {"type": "COMMAND", "id": "SstoryReading", "method": "continueListening" , "features": ""}, + "CONTINUE": {"type": "COMMAND", "id": "SstoryReading", "method": "continueListening" , "features": ""}, "NEXT_WORD": {"type": "COMMAND", "id": "SstoryReading", "method": "nextWord" , "features": ""}, - "ECHO_LINE": {"type": "COMMAND", "id": "SstoryReading", "method": "echoLine" , "features": ""}, "NEXT_LINE": {"type": "COMMAND", "id": "SstoryReading", "method": "nextLine" , "features": ""}, "NEXT_PARA": {"type": "COMMAND", "id": "SstoryReading", "method": "nextPara" , "features": ""}, "NEXT_PAGE": {"type": "COMMAND", "id": "SstoryReading", "method": "nextPage" , "features": ""}, + "ECHO_LINE": {"type": "COMMAND", "id": "SstoryReading", "method": "echoLine" , "features": ""}, + "PARROT_LINE": {"type": "COMMAND", "id": "SstoryReading", "method": "parrotLine", "features": ""}, "SET_ONCLICK_SPEAK_WORD": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_CLICK:String|SPEAK_WORD_BEHAVIOR:String" , "features": "FTR_USER_READ|FTR_USER_READING"}, - "CLR_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_CLICK:String|NULL:String" , "features": ""}, + "CLR_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "ON_CLICK:String|NULL:String" , "features": ""}, - "TIMER_HINT_WORD": {"type": "TIMER", "id": "HintWordTimer", "startdelay": "0", "period": "3000", "repeat": "false", "action": "CREATEANDSTART", "ontimer": "SPEAK_WORD_BEHAVIOR", "features": "FTR_USER_READ|FTR_USER_READING"}, + "TIMER_HINT_WORD": {"type": "TIMER", "id": "HintWordTimer", "startdelay": "0", "period": "3000", "repeat": "false", "action": "CREATEANDSTART", "ontimer": "SPEAK_WORD_BEHAVIOR", "features": "FTR_USER_READ|FTR_USER_READING"}, "TIMER_CANCEL_HINT_WORD": {"type": "TIMER", "id": "HintWordTimer", "action": "CANCEL", "features": ""}, - "TIMER_HINT_BUTTON": {"type": "TIMER", "id": "HintWordTimer", "startdelay": "0", "period": "3500", "repeat": "false", "action": "CREATEANDSTART", "ontimer": "CONFIG_SPEAK_BUTTON", "features": "FTR_USER_READ|FTR_USER_READING"}, + "TIMER_HINT_BUTTON": {"type": "TIMER", "id": "HintWordTimer", "startdelay": "0", "period": "3500", "repeat": "false", "action": "CREATEANDSTART", "ontimer": "CONFIG_SPEAK_BUTTON", "features": "FTR_USER_READ|FTR_USER_READING"}, "TIMER_CANCEL_HINT_BUTTON": {"type": "TIMER", "id": "HintWordTimer", "action": "CANCEL", "features": ""}, - "FLIP_BUTTON_ENABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": ""}, - "FLIP_BUTTON_DISABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": ""}, - "FLIP_BUTTON_SHOW": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "SHOW:String", "features": ""}, - "FLIP_BUTTON_HIDE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "HIDE:String", "features": ""}, + "FLIP_BUTTON_ENABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "ENABLE:String", "features": ""}, + "FLIP_BUTTON_DISABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "DISABLE:String", "features": ""}, + "FLIP_BUTTON_SHOW": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "SHOW:String", "features": ""}, + "FLIP_BUTTON_HIDE": {"type": "COMMAND", "id": "SstoryReading", "method": "setPageFlipButton", "parms": "HIDE:String", "features": ""}, - "SPEAK_BUTTON_ENABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "ENABLE:String", "features": ""}, - "SPEAK_BUTTON_DISABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "DISABLE:String", "features": ""}, - "SPEAK_BUTTON_SHOW": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "SHOW:String", "features": ""}, - "SPEAK_BUTTON_HIDE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""}, + "SPEAK_BUTTON_ENABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "ENABLE:String", "features": ""}, + "SPEAK_BUTTON_DISABLE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "DISABLE:String", "features": ""}, + "SPEAK_BUTTON_SHOW": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "SHOW:String", "features": ""}, + "SPEAK_BUTTON_HIDE": {"type": "COMMAND", "id": "SstoryReading", "method": "setSpeakButton", "parms": "HIDE:String", "features": ""}, - "SET_FLIP_NEXTPAGE": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "PAGE_FLIP_CLICK:String|NEXT_PAGE:String" , "features": ""}, - "RESET_FLIP_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "PAGE_FLIP_CLICK:String|NULL:String" , "features": ""}, + "SET_FLIP_NEXTPAGE": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "PAGE_FLIP_CLICK:String|NEXT_PAGE:String", "features": ""}, + "RESET_FLIP_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "PAGE_FLIP_CLICK:String|NULL:String", "features": ""}, - "SET_SPEAK_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_EVENT:String|SPEAK_WORD_BEHAVIOR:String" , "features": ""}, - "CLR_SPEAK_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_EVENT:String|NULL:String" , "features": ""}, + "SET_SPEAK_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_EVENT:String|SPEAK_WORD_BEHAVIOR:String", "features": ""}, + "CLR_SPEAK_EVENT": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_EVENT:String|NULL:String", "features": ""}, - "SET_SPEAK_WORD": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_CLICK:String|SPEAK_WORD_BEHAVIOR:String" , "features": ""}, - "SET_SPEAK_SENTENCE": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_CLICK:String|SPEAK_SENTENCE_BEHAVIOR:String" , "features": ""}, - "RESET_SPEAK_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_CLICK:String|NULL:String" , "features": ""}, + "SET_SPEAK_WORD": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_CLICK:String|SPEAK_WORD_BEHAVIOR:String", "features": ""}, + "SET_SPEAK_SENTENCE": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_CLICK:String|SPEAK_SENTENCE_BEHAVIOR:String", "features": ""}, + "RESET_SPEAK_ONCLICK": {"type": "COMMAND", "id": "SstoryReading", "method": "setVolatileBehavior", "parms": "SPEAK_CLICK:String|NULL:String", "features": ""}, - "SET_NARRATOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "NARRATE_STORY:String|NARRATE_SENTENCE_BEHAVIOR:String" , "features": ""}, - "SET_UTTERANCE": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_UTTERANCE:String|SPEAK_UTTERANCE_BEHAVIOR:String" , "features": ""}, + "SET_NARRATOR": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "NARRATE_STORY:String|NARRATE_SENTENCE_BEHAVIOR:String", "features": ""}, + "SET_UTTERANCE": {"type": "COMMAND", "id": "SstoryReading", "method": "setStickyBehavior", "parms": "SPEAK_UTTERANCE:String|SPEAK_UTTERANCE_BEHAVIOR:String", "features": ""}, - "EMPTY_ACTION": {"type": "COMMAND", "cmd": "NOOP"}, - "GOTONEXTSCENE": {"type": "COMMAND", "cmd": "NEXTSCENE"}, - "RETURN_AND_GO": {"type": "COMMAND", "cmd": "SUBGRAPH_RETURN_AND_GO"}, + "EMPTY_ACTION": {"type": "COMMAND", "cmd": "NOOP"}, + "GOTONEXTSCENE": {"type": "COMMAND", "cmd": "NEXTSCENE"}, + "RETURN_AND_GO": {"type": "COMMAND", "cmd": "SUBGRAPH_RETURN_AND_GO"}, "RETURN_AND_WAIT": {"type": "COMMAND", "cmd": "SUBGRAPH_RETURN_AND_WAIT"}, "CANCEL_FEEDBACK": {"type": "COMMAND", "cmd": "CANCEL_NODE"}, - "NEXT_NODE": {"type": "COMMAND", "cmd": "NEXT" }, - "WAIT": {"type": "COMMAND", "cmd": "WAIT" }, - "PAUSE": {"type": "COMMAND", "cmd": "WAIT"} + "NEXT_NODE": {"type": "COMMAND", "cmd": "NEXT"}, + "WAIT": {"type": "COMMAND", "cmd": "WAIT"}, + "PAUSE": {"type": "COMMAND", "cmd": "WAIT"} }, "constraintMap": { @@ -509,7 +513,7 @@ "Else": "false" }, - // Note that we don't have operator precidence so we must use ( ) to enforce precidence + // Note that we don't have operator precedence so we must use ( ) to enforce precedence // "STORY_COMPLETE": { "type": "CONDITION", @@ -556,7 +560,7 @@ "STORY_STARTING": { "type": "CONDITION", "test": "FTR_STORY_STARTING" - }, + }, "FTR_RIGHT": { "type": "CONDITION", diff --git a/app/src/main/java/cmu/xprize/robotutor/RoboTutor.java b/app/src/main/java/cmu/xprize/robotutor/RoboTutor.java index c57a82cd4..80f160f26 100644 --- a/app/src/main/java/cmu/xprize/robotutor/RoboTutor.java +++ b/app/src/main/java/cmu/xprize/robotutor/RoboTutor.java @@ -173,7 +173,7 @@ public class RoboTutor extends Activity implements IReadyListener, IRoboTutor { private Thread audioLogThread; // TODO move to config file - private boolean RECORD_AUDIO = false; + private boolean RECORD_AUDIO = true; @Override protected void onCreate(Bundle savedInstanceState) { @@ -429,8 +429,7 @@ protected Boolean doInBackground(Void... unused) { try { // TODO: Don't do this in production // At the moment we always reinstall the tutor spec data - for - - + if(CacheSource.equals(TCONST.EXTERN)) { tutorAssetManager.installAssets(TCONST.TUTORROOT); logManager.postEvent_V(TAG, "INFO:Tutor Assets installed"); @@ -482,7 +481,7 @@ protected Boolean doInBackground(Void... unused) { return result; } - @Override + @Override protected void onPostExecute(Boolean result) { isReady = result; @@ -836,9 +835,9 @@ protected void onDestroy() { // after logging, transfer logs to READY folder logManager.transferHotLogs(hotLogPath, readyLogPath); logManager.transferHotLogs(hotLogPathPerf, readyLogPathPerf); - } + private int getNextLogSequenceId() { SharedPreferences prefs = getPreferences(MODE_PRIVATE); @@ -896,9 +895,7 @@ public static String getPromotionMode(String matrix) { placement = false; } - return placement ? "PLACEMENT" : "PROMOTION"; - } } diff --git a/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutor.java b/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutor.java index 4f10e5a84..2ddd19ca0 100644 --- a/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutor.java +++ b/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutor.java @@ -38,8 +38,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.UUID; @@ -81,7 +79,6 @@ public class CTutor implements ILoadableObject2, IEventSource { private HashMap mObjects = new HashMap(); private ArrayList fFeatures = new ArrayList(); - private ArrayList fDefaults = new ArrayList(); public Context mContext; public ILogManager mTutorLogManager; @@ -92,6 +89,7 @@ public class CTutor implements ILoadableObject2, IEventSource { public String mTutorName = ""; public String mTutorId = ""; + public String mTutorVariant = ""; public AssetManager mAssetManager; public boolean mTutorActive = false; @@ -128,13 +126,12 @@ public class CTutor implements ILoadableObject2, IEventSource { static private final boolean DEBUG = false; - - public CTutor(Context context, String name, String tutorId, ITutorManager tutorContainer, ILogManager logManager, TScope rootScope, String tarLanguage, String featSet) { - + public CTutor(Context context, String name, String tutorId, String tutorVariant, ITutorManager tutorContainer, ILogManager logManager, TScope rootScope, String tarLanguage, String featSet) { mTutorScope = new TScope(this, name, rootScope); mContext = context; mTutorName = name; mTutorId = tutorId; + mTutorVariant = tutorVariant; mTutorContainer = tutorContainer; mTutorLogManager = logManager; @@ -145,7 +142,7 @@ public CTutor(Context context, String name, String tutorId, ITutorManager tutorC uuid = UUID.randomUUID(); - setTutorFeatures(featSet); + setTutorFeatures(tutorVariant, featSet); // Update the unique instance string for the tutor // @@ -168,7 +165,6 @@ public CTutor(Context context, String name, String tutorId, ITutorManager tutorC * accessed by other classes */ private void monitorBattery() { - IntentFilter iFilter = new IntentFilter(Intent.ACTION_BATTERY_CHANGED); Intent batteryStatus = mContext.registerReceiver(null, iFilter); @@ -190,14 +186,11 @@ private void monitorBattery() { //Log.wtf("BATTERY", String.format("status=%d isCharging=%s percent=%f", status, isCharging ? "YES": "NO", batteryPct)); RoboTutor.logManager.postBattery(TCONST.BATTERY_MSG, String.valueOf(batteryPct), chargeType); - } - private void inflateTutor() { - // Load the "tutor_descriptor.json" file - - // TODO : Ultimately this is meant to hold the scene layout data - + // TODO: Ultimately this is meant to hold the scene layout data - loadTutorFactory(); // Load the tutor graph (scene sequence script data) for the tutor @@ -208,17 +201,16 @@ private void inflateTutor() { loadSceneGraph(); } - @Override public String getEventSourceName() { return mTutorName; } + @Override public String getEventSourceType() { return TCONST.TYPE_CTUTOR; } - public ITutorManager getTutorContainer() { return mTutorContainer; } @@ -227,8 +219,7 @@ public ITutorManager getTutorContainer() { * Load the tutorGraph - tutor scene sequence script for this tutor */ private void loadTutorGraph() { - - switch(navigatorType) { + switch (navigatorType) { case TCONST.SIMPLENAV: mTutorGraph = new CTutorGraph(this, mTutorName, mTutorContainer, mTutorScope); break; @@ -239,20 +230,16 @@ private void loadTutorGraph() { } } - /** * Load the scenegraph - scene animation scripts for this tutor * Push the scenegraph into the tutorgraph for scripting purposes * */ private void loadSceneGraph() { - mSceneGraph = new CSceneGraph(this, mTutorScope, mTutorGraph); - mTutorGraph.setSceneGraph(mSceneGraph); } - /** * Update the current scene container view * @@ -262,7 +249,6 @@ public void setSceneContainer(ViewGroup container) { mSceneContainer = container; } - /** * This is where the tutor gets kick started * Note we pass the extDataSource if available - otherwise we use the local descriptor @@ -270,18 +256,15 @@ public void setSceneContainer(ViewGroup container) { * dynamically by the scenegraph. */ public void launchTutor(defdata_tutor extDataSource) { - mTutorActive = true; - mTutorGraph.setDefDataSource((extDataSource != null)? extDataSource:dataSource); + mTutorGraph.setDefDataSource((extDataSource != null) ? extDataSource : dataSource); mTutorGraph.post(this, TCONST.FIRST_SCENE); } - /** * */ public void onDestroy() { - // Release the Tutor resources scenedata = null; language = null; @@ -291,7 +274,6 @@ public void onDestroy() { // Release the scene graph first so the scene data is still intact during destruction // TODO: don't know if this sequencing is required mSceneGraph.onDestroy(); - mTutorGraph.onDestroy(); } @@ -335,7 +317,6 @@ public void setTotalQuestions(int totalQuestions) { this.totalQuestions = totalQuestions; } - public UUID getUuid() { return uuid; } @@ -349,7 +330,6 @@ public Queue(String command) { } private void cleanUpTutor() { - // GRAY_SCREEN_BUG tutor might be cleaned up here Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "r1: Cleaning up tutor " + mTutorName); CMediaController.destroyMediaManager(mTutorName); @@ -364,22 +344,18 @@ private void cleanUpTutor() { mTutorActive = false; } - @Override public void run() { - try { queueMap.remove(this); switch (_command) { - // This is how you kill a running tutor externally - // When the engine wants to kill a tutor and start another. // killDeadTutor just cleans up the now unused tutor. What happens // after is the responsability of the poster of the event // case TCONST.KILLTUTOR: - Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "r2: In Queue: " + _command); cleanUpTutor(); @@ -391,7 +367,6 @@ public void run() { // of some sort of session manager of exit the app completely // case TCONST.ENDTUTOR: - // don't do end of tutor assessment when we're ending the default tutor (activity selector) if (!mTutorName.equals(CTutorEngine.defTutor)) { // assess student performance after tutor is completed @@ -406,20 +381,17 @@ public void run() { CTutorEngine.destroyCurrentTutor(); break; - // This is how a tutor stops itself - // DestroyCurrentTutor should remove the tutor and manage the launch // of some sort of session manager of exit the app completely // case TCONST.FINISH: - Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "r2: In Queue: " + _command); cleanUpTutor(); CTutorEngine.destroyCurrentTutor(); RoboTutor.ACTIVITY.finish(); break; - } } catch(Exception e) { @@ -428,43 +400,31 @@ public void run() { } } - /** * Disable the input queues permenantly in prep for destruction * walks the queue chain to diaable scene queue * */ private void terminateQueue() { - // disable the input queue permenantly in prep for destruction // mDisabled = true; flushQueue(); } - /** * Remove any pending scenegraph commands. * */ private void flushQueue() { - try { - Iterator tObjects = queueMap.entrySet().iterator(); - - while (tObjects.hasNext()) { - Map.Entry entry = (Map.Entry) tObjects.next(); - - mainHandler.removeCallbacks((Queue) (entry.getValue())); - } - } - catch(Exception e) { + for (Object entry : queueMap.entrySet()) mainHandler.removeCallbacks((Queue)((Map.Entry)entry).getValue()); + } catch(Exception e) { Log.d(TAG, "flushQueue Error: " + e); } } - /** * Keep a mapping of pending messages so we can flush the queue if we want to terminate * the tutor before it finishes naturally. @@ -472,30 +432,23 @@ private void flushQueue() { * @param qCommand */ private void enQueue(Queue qCommand) { - RoboTutor.logManager.postEvent_V(TAG, "Processing POST to tutorGraph: " + qCommand._command ); - if(!mDisabled) { + if (!mDisabled) { queueMap.put(qCommand, qCommand); - mainHandler.post(qCommand); } } - /** * Post a command to the tutorgraph queue * * @param command */ public void post(String command) { - enQueue(new Queue(command)); } - - - /** * Return the view within the current scene container * @@ -503,46 +456,36 @@ public void post(String command) { * @return */ public View getViewByName(String findme) { - - HashMap map = mTutorGraph.getChildMap(); - - return (View)map.get(findme); + return (View)mTutorGraph.getChildMap().get(findme); } - - public ITutorObject getViewById(int findme, ViewGroup container) { ITutorObject foundView = null; - if(container == null) - container = (ViewGroup)mSceneContainer; + if (container == null) container = (ViewGroup)mSceneContainer; try { for (int i = 0; (foundView == null) && (i < container.getChildCount()); ++i) { - ITutorObject nextChild = (ITutorObject) container.getChildAt(i); + ITutorObject nextChild = (ITutorObject)container.getChildAt(i); - if (((View) nextChild).getId() == findme) { + if (((View)nextChild).getId() == findme) { foundView = nextChild; break; } else { - if (nextChild instanceof ViewGroup) - foundView = getViewById(findme, (ViewGroup) nextChild); + if (nextChild instanceof ViewGroup) foundView = getViewById(findme, (ViewGroup) nextChild); } } - } - catch (Exception e) { + } catch (Exception e) { Log.i(TAG, "View walk error: " + e); } return foundView; } - public TScope getScope() { return mTutorScope; } - public ITutorGraph getTutorGraph() { return mTutorGraph; } @@ -562,17 +505,13 @@ public void updateLanguageFeature(String langFtr) { // Remove any active language - Only want one language feature active delFeature(mMediaManager.getLanguageFeature(this)); - addFeature(langFtr); } public String getLanguageFeature() { - return mMediaManager.getLanguageFeature(this); } - - // Language management //************************************************************************** @@ -589,22 +528,17 @@ public InputStream openAsset(String path) throws IOException { return mAssetManager.open(path); } - // framendx is a simple counter used it uniquely id a scene instance for logging // public void incFrameNdx() { _framendx++; } - public void add(String Id, ITutorObject obj) { - mObjects.put(Id, obj); } - public ITutorObject get(String Id) { - return mObjects.get(Id); } @@ -616,7 +550,6 @@ public ITutorObject get(String Id) { * @return */ public View instantiateScene(scene_descriptor scenedata) { - int i1; View tarScene; View subScene; @@ -628,28 +561,25 @@ public View instantiateScene(scene_descriptor scenedata) { int id = mContext.getResources().getIdentifier(scenedata.id, "layout", mContext.getPackageName()); - LayoutInflater inflater = (LayoutInflater)mContext.getSystemService - (Context.LAYOUT_INFLATER_SERVICE); + LayoutInflater inflater = (LayoutInflater)mContext.getSystemService(Context.LAYOUT_INFLATER_SERVICE); tarScene = inflater.inflate(id, null ); - if(traceMode) Log.d(TAG, "Creating Scene : " + scenedata.id); + if (traceMode) Log.d(TAG, "Creating Scene: " + scenedata.id); tarScene.setVisibility(View.VISIBLE); // Generate the automation hooks - automateScene((ITutorSceneImpl) tarScene, scenedata); + automateScene((ITutorSceneImpl)tarScene, scenedata); // Parse the JSON spec data for onCreate Commands onCreate(scenedata); - return (View) tarScene; + return (View)tarScene; } - private void automateScene(ITutorSceneImpl tutorContainer, scene_descriptor scenedata) { - - // Propogate to children + // Propagate to children // HashMap childMap = new HashMap(); @@ -666,40 +596,30 @@ private void automateScene(ITutorSceneImpl tutorContainer, scene_descriptor scen mapChildren(tutorContainer, childMap); try { - Iterator tObjects = childMap.entrySet().iterator(); - // post create / inflate / init / map - here everything is created including the // view map to permit findViewByName // - while (tObjects.hasNext()) { - Map.Entry entry = (Map.Entry) tObjects.next(); - - ((ITutorObject) (entry.getValue())).onCreate(); - } - } - catch(Exception e) { + for (Object entry : childMap.entrySet()) ((ITutorObject)((Map.Entry)entry).getValue()).onCreate(); + } catch(Exception e) { Log.d(TAG, "automateScene Error: " + e); } } - private void mapChildren(ITutorSceneImpl tutorContainer, HashMap childMap) { - ITutorObject child; // Add the container as well so we can find it in a getViewByName search // childMap.put(tutorContainer.name(), tutorContainer); - int count = ((ViewGroup) tutorContainer).getChildCount(); + int count = ((ViewGroup)tutorContainer).getChildCount(); // Iterate through all children for (int i = 0; i < count; i++) { try { - child = (ITutorObject) ((ViewGroup) tutorContainer).getChildAt(i); - - if(childMap.containsKey(child.name())) { + child = (ITutorObject) ((ViewGroup)tutorContainer).getChildAt(i); + if (childMap.containsKey(child.name())) { CErrorManager.logEvent(TAG, "ERROR: Duplicate child view in:" + tutorContainer.name() + " - Duplicate of: " + child.name(), new Exception("no-exception"), false); } @@ -710,157 +630,107 @@ private void mapChildren(ITutorSceneImpl tutorContainer, HashMap childMap) { child.setNavigator(mTutorGraph); child.setLogManager(mTutorLogManager); - if(child instanceof ITutorSceneImpl) { - mapChildren((ITutorSceneImpl)child, childMap); - } - + if (child instanceof ITutorSceneImpl) mapChildren((ITutorSceneImpl)child, childMap); } catch (ClassCastException e) { - CErrorManager.logEvent(TAG, "ERROR: Non-ITutor child view in:" + tutorContainer.name(), e, false); } } } - private void onCreate(scene_descriptor scenedata) { - // Parse the oncreate command set - type_action[] createCmds = _sceneMap.get(scenedata.id).oncreate; + type_action[] createCmds = _sceneMap.get(scenedata.id).oncreate; // Can have an empty JSON array - so filter that out // - if(createCmds != null) { - + if (createCmds != null) { for (type_action cmd : createCmds) { - - if(cmd.testFeatures()) { - cmd.applyNode(); - } - + if (cmd.testFeatures()) cmd.applyNode(); } } } - /** - * generate the working feature set for this tutor instance + * get the feature set for this tutor variant from tutorVariants * - * @param featSet + * @param variant */ - public void setTutorFeatures(String featSet) { + public String getTutorFeatures(String variant) { + return CTutorEngine.tutorVariants.get(variant).features; + } + /** + * set the working feature set for this tutor instance + * + * @param featureSet + */ + public void setTutorFeatures(String variant, String featureSet) { // Ignore "null" feature sets which may come during a tutor launch if there is no // features data in the session_manager dataset // - if(!featSet.toUpperCase().equals("NULL")) { - - List featArray = new ArrayList(); - - if (featSet != null && featSet.length() > 0) - featArray = Arrays.asList(featSet.split(":")); - - fFeatures = new ArrayList(); - - // Add default features + if (variant != null) setFeatures(getTutorFeatures(variant)); + else if (!featureSet.toUpperCase().equals("NULL")) setFeatures(featureSet); - for (String feature : fDefaults) { - fFeatures.add(feature); - } - - // Add instance feature - - for (String feature : featArray) { - fFeatures.add(feature); - } - } + Log.d(TAG, "setTutorFeatures: features = " + getFeatures()); } - /** - * get : delimited string of features + * get: delimited string of features * ## Mod Oct 16 2012 - logging support * */ public String getFeatures() { StringBuilder builder = new StringBuilder(); - for(String feature: fFeatures) { - builder.append(feature).append(':'); - } + for (String feature : fFeatures) builder.append(feature).append(':'); builder.deleteCharAt(builder.length() - 1); return builder.toString(); } - /** - * set : delimited string of features + * set: delimited string of features * ## Mod Dec 03 2013 - DB state support * - * @param ftrSet + * @param featureSet */ - public void setFeatures(String ftrSet) { + public void setFeatures(String featureSet) { // Add new features - no duplicates - List featArray = Arrays.asList(ftrSet.split(",")); - fFeatures.clear(); - - for (String feature : featArray) { - fFeatures.add(feature); - } + for (String feature : Arrays.asList(featureSet.split(":"))) fFeatures.add(feature); } - - // udpate the working feature set for this instance + // update the working feature set for this instance // - public void addFeature(String feature) - { + public void addFeature(String feature) { // Add new features - no duplicates - - if(fFeatures.indexOf(feature) == -1) - { - fFeatures.add(feature); - } + if (fFeatures.indexOf(feature) == -1) fFeatures.add(feature); } - - // udpate the working feature set for this instance + // update the working feature set for this instance // public void delFeature(String feature) { + // Remove features - no duplicates int fIndex; - - // remove features - no duplicates - - if((fIndex = fFeatures.indexOf(feature)) != -1) - { - fFeatures.remove(fIndex); - } + if ((fIndex = fFeatures.indexOf(feature)) != -1) fFeatures.remove(fIndex); } - - //## Mod Jul 01 2012 - Support for NOT operation on features. + // Support for NOT operation on features. // - // public boolean testFeature(String element) { - if(element.charAt(0) == '!') - { - return (fFeatures.indexOf(element.substring(1)) != -1)? false : true; - } - else { - return (fFeatures.indexOf(element) != -1) ? true : false; - } + if (element.charAt(0) == '!') return (fFeatures.indexOf(element.substring(1)) == -1); + else return (fFeatures.indexOf(element) != -1); } public String testFeatureHelper(String element) { - if(element.charAt(0) == '!') { - if(element.substring(1).equals("true")) return "false"; - if(element.substring(1).equals("false")) return "true"; + if (element.charAt(0) == '!') { + if (element.substring(1).equals("true")) return "false"; + if (element.substring(1).equals("false")) return "true"; return (fFeatures.indexOf(element.substring(1)) != -1)? "false" : "true"; - } - else { - if(element.equals("true")) return "true"; - if(element.equals("false")) return "false"; + } else { + if (element.equals("true")) return "true"; + if (element.equals("false")) return "false"; return (fFeatures.indexOf(element) != -1) ? "true" : "false"; } } @@ -869,8 +739,7 @@ public String testFeatureHelper(String element) { // TODO: Enhance with fsm // Doesn't allow inner paren matching public boolean testFeatureSet(String featSet) { - String result = testFeatureSetHelper(featSet); - return result.equals("true") ? true : false; + return testFeatureSetHelper(featSet).equals("true"); } public String testFeatureSetHelper(String featSet) { @@ -879,19 +748,17 @@ public String testFeatureSetHelper(String featSet) { StringBuffer featSetBuffer = new StringBuffer(featSet); - while(featSetBuffer.indexOf("(") != -1) { - for(int i = 0; i < featSetBuffer.length(); i++) { - String curString = featSetBuffer.substring(i, i+1); + while (featSetBuffer.indexOf("(") != -1) { + for (int i = 0; i < featSetBuffer.length(); i++) { + String curString = featSetBuffer.substring(i, i + 1); - if(curString.equals("(")) { + if (curString.equals("(")) { curParenCount += 1; - if(leftMostOpenParen == -1) { - leftMostOpenParen = i; - } + if (leftMostOpenParen == -1) leftMostOpenParen = i; } - if(curString.equals(")")) { + if (curString.equals(")")) { curParenCount -= 1; - if(curParenCount == 0) { + if (curParenCount == 0) { String withParen = featSetBuffer.substring(leftMostOpenParen + 1, i); featSetBuffer.replace(leftMostOpenParen, i+1, testFeatureSetHelper(withParen)); leftMostOpenParen = -1; @@ -900,38 +767,27 @@ public String testFeatureSetHelper(String featSet) { } } } + return testNonParenFeatureSet(featSetBuffer.toString()); } private String testNonParenFeatureSet(String featSet) { - - String result = "false"; - - List disjFeat = Arrays.asList(featSet.split("\\|")); // | Disjunctive features - List conjFeat; // & Conjunctive features + String result = "false"; // match a null set - i.e. empty string means the object is not feature constrained + if (featSet.equals("")) return "true"; - if(featSet.equals("")) - return "true"; - - // Check all disjunctive featuresets - one in each element of disjFeat + // Check all disjunctive feature sets // As long as one is true we pass - - for (String dfeature : disjFeat) - { - conjFeat = Arrays.asList(dfeature.split("\\&")); + for (String dfeature : Arrays.asList(featSet.split("\\|"))) { result = "true"; // Check that all conjunctive features are set in fFeatures - - for (String cfeature : conjFeat) { - if(!(testFeatureHelper(cfeature) == "true")) - result = "false"; + for (String cfeature : Arrays.asList(dfeature.split("\\&"))) { + if (!(testFeatureHelper(cfeature) == "true")) result = "false"; } - if(result == "true") - break; + if (result == "true") break; } return result; @@ -941,10 +797,14 @@ private String testNonParenFeatureSet(String featSet) { public String getTutorName() { return mTutorName; } + public String getTutorId() { return mTutorId; } + public String getTutorVariant() { + return mTutorVariant; + } public AssetManager getAssetManager() { return mAssetManager; @@ -962,10 +822,8 @@ public void gotoNode(String nodeID) { } - //************ Serialization - /** * Load the Tutor specification from JSON file data * from assets/tutors//tutor_descriptor.json @@ -974,23 +832,18 @@ public void gotoNode(String nodeID) { * and completely define view layout in TDESC */ private void loadTutorFactory() { - try { loadJSON(new JSONObject(JSON_Helper.cacheData(TCONST.TUTORROOT + "/" + mTutorName + "/" + TCONST.TDESC)), (IScope2)mTutorScope); - } catch (JSONException e) { Log.d(TAG, "error"); } } - public void loadJSON(JSONObject jsonObj, IScope2 scope) { - JSON_Helper.parseSelf(jsonObj, this, CClassMap2.classMap, scope); // Use updateLanguageFeature to properly override the Engine language feature - if(language != null) - mMediaManager.setLanguageFeature(this, language); + if (language != null) mMediaManager.setLanguageFeature(this, language); // push the soundMap into the MediaManager - // @@ -998,14 +851,12 @@ public void loadJSON(JSONObject jsonObj, IScope2 scope) { // Create a associative cache for the initialization data // - for(scene_initializer scene : scenedata) { - _sceneMap.put(scene.id, scene); - } + for (scene_initializer scene : scenedata) _sceneMap.put(scene.id, scene); } + @Override public void loadJSON(JSONObject jsonObj, IScope scope) { // Log.d(TAG, "Loader iteration"); loadJSON(jsonObj, (IScope2) scope); } - } diff --git a/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutorEngine.java b/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutorEngine.java index 83f937448..4cba9468d 100644 --- a/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutorEngine.java +++ b/app/src/main/java/cmu/xprize/robotutor/tutorengine/CTutorEngine.java @@ -33,7 +33,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import cmu.xprize.comp_logging.CLogManager; @@ -92,13 +91,12 @@ public class CTutorEngine implements ILoadableObject2 { private String EXPECTED_VERSION = "1.0"; // json loadable - static public String descr_version; // + static public String desc_version; // static public String defTutor; // defined in engine_descriptor.json static public HashMap tutorVariants; static public HashMap bindingPatterns; static public String language; // Accessed from a static context - final static private String TAG = "CTutorEngine"; @@ -113,7 +111,6 @@ public class CTutorEngine implements ILoadableObject2 { * @param context */ private CTutorEngine(RoboTutor context) { - mRootScope = new TScope(null, "root", null); Activity = context; @@ -132,15 +129,11 @@ private CTutorEngine(RoboTutor context) { * @return */ static public CTutorEngine getTutorEngine(RoboTutor context) { - - if(singletonTutorEngine == null) { - singletonTutorEngine = new CTutorEngine(context); - } + if (singletonTutorEngine == null) singletonTutorEngine = new CTutorEngine(context); return singletonTutorEngine; } - /** * This is primarily intended as a development API to allow updating the working language * at runtime. @@ -154,7 +147,6 @@ static public void setDefaultLanguage(String newLang) { promotionMechanism = new PromotionMechanism(studentModel, matrix); } - /** * This is primarily intended as a development API to allow updating the working language * at runtime. @@ -163,36 +155,24 @@ static public String getDefaultLanguage() { return language; } - static public TScope getScope() { - return mRootScope; } - static public Activity getActivity() { return Activity; } - static public void pauseTutor() { - } - /** * Used to destroy all tutors when the system calls onDestroy for the app * */ static public void killAllTutors() { - - while(tutorMap.size() > 0) { - - Iterator tutorObjects = tutorMap.entrySet().iterator(); - - Map.Entry entry = (Map.Entry) tutorObjects.next(); - - CTutor tutor = ((CTutor) (entry.getValue())); + for (Object entry : tutorMap.entrySet()) { + CTutor tutor = (CTutor)((Map.Entry)entry).getValue(); // Note the endTutor call will invalidate this iterator so recreate it // on each pass @@ -200,18 +180,13 @@ static public void killAllTutors() { //tutor.terminateQueue(); //tutor.endTutor(); } - singletonTutorEngine = null; } - static public void startSessionManager() { - defdata_tutor tutorBindings = null; - if(bindingPatterns != null) { - tutorBindings = bindingPatterns.get(defTutor); - } + if (bindingPatterns != null) tutorBindings = bindingPatterns.get(defTutor); // These features are based on the current tutor selection model // When no tutor has been selected it should run the tutor select @@ -235,7 +210,7 @@ static public void startSessionManager() { } } - createAndLaunchTutor(defTutor, RoboTutor.SELECTOR_MODE, null, tutorBindings); // where Activity Selector is launched + createAndLaunchTutor(defTutor, null, RoboTutor.SELECTOR_MODE, null, tutorBindings); // where Activity Selector is launched } /** @@ -243,14 +218,12 @@ static public void startSessionManager() { * This launches a new tutor immediately at startup. Used for quick debugging. */ static public void quickLaunch(String tutorVariant, String tutorId, String tutorFile) { + for (String name : tutorVariants.keySet()){ - for (String name: tutorVariants.keySet()){ - - String key =name.toString(); + String key = name.toString(); String value = tutorVariants.get(name).tutorName; String feats = tutorVariants.get(name).features; System.out.println(key + " tutorName: " + value + " Features: " + feats); - } String value = tutorVariants.get(tutorVariant).tutorName; String feats = tutorVariants.get(tutorVariant).features; @@ -260,12 +233,12 @@ static public void quickLaunch(String tutorVariant, String tutorId, String tutor initializeBindingPattern(tutorBinding, tutorFile); - createAndLaunchTutor(tutorDescriptor.tutorName , tutorDescriptor.features, tutorId, tutorBinding); + createAndLaunchTutor(tutorDescriptor.tutorName, tutorVariant, tutorDescriptor.features, tutorId, tutorBinding); } /** * Here a tutor is destroying itself - so we need to manage the follow-on process - - * i.e. start some other activity / tutor or session mamagement task. + * i.e. start some other activity / tutor or session management task. */ static public void destroyCurrentTutor() { @@ -289,12 +262,10 @@ static public void destroyCurrentTutor() { deadTutor = null; } - /** * Here a tutor has been killed off externally and need to be cleaned up. */ static public void killDeadTutor() { - Log.d(TAG, "killDeadTutor: " + deadTutor.getTutorName()); // Get the tutor being killed and do a depth first destruction to allow @@ -304,17 +275,13 @@ static public void killDeadTutor() { deadTutor = null; } - /** * Here a tutor is being destroying externally */ static public void killActiveTutor() { - // GRAY_SCREEN_BUG - if(activeTutor != null) { - + if (activeTutor != null) { deadTutor = activeTutor; - activeTutor = null; Log.d(TAG, "Killing Tutor: " + deadTutor.getTutorName()); @@ -327,15 +294,13 @@ static public void killActiveTutor() { } } - /** * Create a tutor by name - if a tutor is running already then kill it off first * * @param tutorName * @param features */ - static private void createAndLaunchTutor(String tutorName, String features, String tutorId, defdata_tutor dataSource) { - + static private void createAndLaunchTutor(String tutorName, String tutorVariant, String features, String tutorId, defdata_tutor dataSource) { Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "r4: killActiveTutor called from createAndLaunchTutor(" + tutorName + ")"); killActiveTutor(); @@ -353,29 +318,24 @@ static private void createAndLaunchTutor(String tutorName, String features, Stri // GRAY_SCREEN_BUG CTutor created --> Media Manager created --> added to map Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "p2: Initializing tutor: " + tutorName); - activeTutor = new CTutor(Activity, tutorName, tutorId, (ITutorManager)tutorContainer, TutorLogManager, mRootScope, language, features); + activeTutor = new CTutor(Activity, tutorName, tutorId, tutorVariant, (ITutorManager)tutorContainer, TutorLogManager, mRootScope, language, features); activeTutor.launchTutor(dataSource); } - static private defdata_scenes parseSceneData(defdata_tutor dataPattern, String[] componentSet) { - defdata_scenes sceneData = new defdata_scenes(); ArrayList bindings = new ArrayList<>(); String compData = null; String compName = null; - for(String component : componentSet) { - + for (String component : componentSet) { String[] dataSet = component.split(":"); if (dataSet.length == 1) { - compName = "*"; compData = dataSet[0]; - } else { compName = dataSet[0]; compData = dataSet[1]; @@ -384,35 +344,27 @@ static private defdata_scenes parseSceneData(defdata_tutor dataPattern, String[] bindings.add(new databinding(compName, compData)); } - sceneData.databindings = (databinding[]) bindings.toArray(new databinding[bindings.size()]); + sceneData.databindings = bindings.toArray(new databinding[bindings.size()]); return sceneData; } - static private defdata_tutor parseDataSpec(String dataSpec) { - defdata_tutor dataPattern = new defdata_tutor(); defdata_scenes sceneData = null; String sceneName = null; - String[] sceneSet = dataSpec.split(";"); - - for(String scene : sceneSet) { - + for (String scene : dataSpec.split(";")) { String[] sceneElements = scene.split("\\|"); // If there is only 1 element then there is only one scene and its name is implied // - if(sceneElements.length == 1) { - + if (sceneElements.length == 1) { sceneName = "*"; sceneData = parseSceneData(dataPattern, sceneElements); - } - else { - sceneName = sceneElements[0]; + } else { + sceneName = sceneElements[0]; sceneElements = Arrays.copyOfRange(sceneElements, 1, sceneElements.length); - sceneData = parseSceneData(dataPattern, sceneElements); } @@ -422,61 +374,38 @@ static private defdata_tutor parseDataSpec(String dataSpec) { return dataPattern; } - - static private void initComponentBindings(databinding[] targetbindings, databinding[] databindings) { - - for(databinding binding : databindings) { - - if(binding.name.equals("*")) { - if(targetbindings.length == 1) { - targetbindings[0].datasource = binding.datasource; - } - else { - Log.e(TAG, "ERROR: Incompatible datasource"); - } - } - else { - for(databinding tbinding : targetbindings) { - if(tbinding.name.equals(binding.name)) { + static private void initComponentBindings(databinding[] targetbindings, databinding[] databindings) { + for (databinding binding : databindings) { + if (binding.name.equals("*")) { + if (targetbindings.length == 1) targetbindings[0].datasource = binding.datasource; + else Log.e(TAG, "ERROR: Incompatible datasource"); + } else { + for (databinding tbinding : targetbindings) { + if (tbinding.name.equals(binding.name)) { tbinding.datasource = binding.datasource; break; } } } - } } - - static private void initSceneBindings(defdata_tutor bindingPattern, String sceneName, databinding[] databindings) { - - if(sceneName.equals("*")) { - System.out.println(bindingPattern.scene_bindings.isEmpty()); - if(bindingPattern.scene_bindings.size() == 1) { - - Iterator scenes = bindingPattern.scene_bindings.entrySet().iterator(); - while(scenes.hasNext() ) { - - Map.Entry scene = (Map.Entry) scenes.next(); - - databinding[] scenebindings = ((defdata_scenes)scene.getValue()).databindings; - + static private void initSceneBindings(defdata_tutor bindingPattern, String sceneName, databinding[] databindings) { + if (sceneName.equals("*")) { + if (bindingPattern.scene_bindings.size() == 1) { + for (Object scene : bindingPattern.scene_bindings.entrySet()) { + databinding[] scenebindings = ((defdata_scenes)((Map.Entry)scene).getValue()).databindings; initComponentBindings(scenebindings, databindings); } - } - else { + } else { Log.e(TAG, "ERROR: Incompatible datasource"); } - } - else { + } else { defdata_scenes compData = bindingPattern.scene_bindings.get(sceneName); - initComponentBindings(compData.databindings, databindings); } - } - /** * The data spec is encoded as: * @@ -485,8 +414,8 @@ static private void initSceneBindings(defdata_tutor bindingPattern, String scen * = component:datasource * * e.g. - * tutor_scene1|sceme_compD:[dataencoding]datasource|sceme_compM:[dataencoding]datasource; - * tutor_scene2|sceme_compQ:[dataencoding]datasource; ... + * tutor_scene1|scene_compD:[dataencoding]datasource|scene_compM:[dataencoding]datasource; + * tutor_scene2|scene_compQ:[dataencoding]datasource; ... * * * @@ -494,24 +423,12 @@ static private void initSceneBindings(defdata_tutor bindingPattern, String scen * @param dataSpec */ static private void initializeBindingPattern(defdata_tutor bindingPattern, String dataSpec) { - System.out.print("dataSpec: "); - System.out.println(dataSpec); - defdata_tutor dataBindings = parseDataSpec(dataSpec); - - Iterator scenes = dataBindings.scene_bindings.entrySet().iterator(); - - while(scenes.hasNext() ) { - - Map.Entry scene = (Map.Entry) scenes.next(); - - String sceneName = (String)scene.getKey(); - databinding[] databindings = ((defdata_scenes)scene.getValue()).databindings; - - initSceneBindings(bindingPattern, sceneName, databindings); + for (Object scene : parseDataSpec(dataSpec).scene_bindings.entrySet()) { + databinding[] databindings = ((defdata_scenes)((Map.Entry)scene).getValue()).databindings; + initSceneBindings(bindingPattern, (String)((Map.Entry)scene).getKey(), databindings); } } - /** * Scriptable Launch command * @@ -519,7 +436,6 @@ static private void initializeBindingPattern(defdata_tutor bindingPattern, Strin * @param intentType */ static public void launch(String intentType, String tutorVariant, String dataSource, String tutorId) { - Log.d(TAG, "launch: tutorId=" + tutorId); Intent extIntent = new Intent(); @@ -534,28 +450,22 @@ static public void launch(String intentType, String tutorVariant, String dataSou // initializeBindingPattern(tutorBinding, dataSource); - switch(intentType) { - + switch (intentType) { // Create a native tutor with the given base features // These features are used to determine basic tutor functionality when // multiple tutors share a single scenegraph // case "native": - Log.d(TCONST.DEBUG_GRAY_SCREEN_TAG, "p3b: Creating Tutor in 'CTutor.launch': " + tutorDescriptor.tutorName); - createAndLaunchTutor(tutorDescriptor.tutorName, tutorDescriptor.features, tutorId, tutorBinding); + createAndLaunchTutor(tutorDescriptor.tutorName, tutorVariant, tutorDescriptor.features, tutorId, tutorBinding); break; case "browser": - extIntent = new Intent(Intent.ACTION_VIEW, Uri.parse("file:///" + tutorVariant)); - getActivity().startActivity(extIntent); - break; default: - // This a special allowance for MARi which placed their activities in a different // package from their app - so we check for intent of the form ":" // @@ -563,13 +473,12 @@ static public void launch(String intentType, String tutorVariant, String dataSou // If it is ":" // - if(intentParts.length > 1) { + if (intentParts.length > 1) { extPackage = intentParts[0]; - tutorVariant = intentParts[1]; - } - // Otherwise we expect the activities to be right off the package. - // - else { + tutorVariant = intentParts[1]; + } else { + // Otherwise we expect the activities to be right off the package. + // extPackage = tutorVariant.substring(0, tutorVariant.lastIndexOf('.')); } @@ -578,8 +487,7 @@ static public void launch(String intentType, String tutorVariant, String dataSou try { getActivity().startActivity(extIntent); - } - catch(Exception e) { + } catch(Exception e) { Log.e(TAG, "Launch Error: " + e + " : " + tutorVariant); } break; @@ -589,20 +497,18 @@ static public void launch(String intentType, String tutorVariant, String dataSou //************ Serialization - /** * Load the Tutor engine specification from JSON file data * from assets/tutors/engine_descriptor.json * */ public void loadEngineDescr() { - try { loadJSON(new JSONObject(JSON_Helper.cacheData(TCONST.TUTORROOT + "/" + TCONST.EDESC)), (IScope2)mRootScope); - // TODO : Use build Variant to ensure release configurations + // TODO: Use build Variant to ensure release configurations // - if(Configuration.languageOverride(getActivity())) { + if (Configuration.languageOverride(getActivity())) { language = Configuration.getLanguageFeatureID(getActivity()); // any time the language changes, so should the Transition Matrix and the Student Model } else { @@ -673,7 +579,6 @@ private static TransitionMatrixModel loadTransitionMatrixModel() { return matrix; } - /** * Load the Tutor specification from JSON file data * @@ -681,13 +586,12 @@ private static TransitionMatrixModel loadTransitionMatrixModel() { */ @Override public void loadJSON(JSONObject jsonData, IScope2 scope) { - JSON_Helper.parseSelf(jsonData, this, CClassMap2.classMap, scope); } + @Override public void loadJSON(JSONObject jsonObj, IScope scope) { // Log.d(TAG, "Loader iteration"); loadJSON(jsonObj, (IScope2) scope); } - } diff --git a/app/src/main/java/cmu/xprize/robotutor/tutorengine/widgets/core/TRtComponent.java b/app/src/main/java/cmu/xprize/robotutor/tutorengine/widgets/core/TRtComponent.java index cd50e4048..ac47d4fa9 100644 --- a/app/src/main/java/cmu/xprize/robotutor/tutorengine/widgets/core/TRtComponent.java +++ b/app/src/main/java/cmu/xprize/robotutor/tutorengine/widgets/core/TRtComponent.java @@ -28,7 +28,6 @@ import java.util.Arrays; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -60,7 +59,6 @@ import cmu.xprize.util.JSON_Helper; import cmu.xprize.util.TCONST; import edu.cmu.xprize.listener.ListenerBase; - import static cmu.xprize.util.TCONST.ASREventMap; import static cmu.xprize.util.TCONST.LANG_AUTO; import static cmu.xprize.util.TCONST.LOCAL_STORY_AUDIO; @@ -84,6 +82,7 @@ public class TRtComponent extends CRt_Component implements IBehaviorManager, ITu static private String TAG = "TRtComponent"; + public TRtComponent(Context context) { super(context); } @@ -106,7 +105,6 @@ public void init(Context context, AttributeSet attrs) { prepareListener(CMediaController.getTTS()); } - @Override public void onDestroy() { super.onDestroy(); @@ -119,18 +117,13 @@ public void onDestroy() { //************************************************************************ // IBehaviorManager Interface START - public void setVolatileBehavior(String event, String behavior) { - - Log.d("SET_BEHAVIOR - Volatile", "Event: " + event + " - behavior: " + behavior ); + Log.d("SET_BEHAVIOR - Volatile", "Event: " + event + " - behavior: " + behavior); enableOnClickBehavior(event, behavior); if (behavior.toUpperCase().equals(TCONST.NULL)) { - - if (volatileMap.containsKey(event)) { - volatileMap.remove(event); - } + if (volatileMap.containsKey(event)) volatileMap.remove(event); } else { volatileMap.put(event, behavior); } @@ -141,22 +134,15 @@ public void setVolatileBehavior(String event, String behavior) { if (eventType != null) switch (eventType) { - case TCONST.SILENCE_EVENT: case TCONST.SOUND_EVENT: case TCONST.WORD_EVENT: - - if (behavior.toUpperCase().equals(TCONST.NULL)) { - - mListener.resetStaticEvent(eventType); - } else { - mListener.configStaticEvent(eventType); - } + if (behavior.toUpperCase().equals(TCONST.NULL)) mListener.resetStaticEvent(eventType); + else mListener.configStaticEvent(eventType); break; } } - /** Special Behavior processing for timed ASR events which must be setup in the listener component * * @param event @@ -164,7 +150,6 @@ public void setVolatileBehavior(String event, String behavior) { * @param timeout */ public void setVolatileBehavior(String event, String behavior, int timeout) { - // Setup the behavior // setVolatileBehavior(event, behavior); @@ -175,34 +160,22 @@ public void setVolatileBehavior(String event, String behavior, int timeout) { if (eventType != null) switch (eventType) { - case TCONST.TIMEDSILENCE_EVENT: case TCONST.TIMEDSOUND_EVENT: case TCONST.TIMEDWORD_EVENT: - - if (behavior.toUpperCase().equals(TCONST.NULL)) { - - mListener.resetTimedEvent(eventType); - } - else { - mListener.configTimedEvent(eventType, timeout); - } + if (behavior.toUpperCase().equals(TCONST.NULL)) mListener.resetTimedEvent(eventType); + else mListener.configTimedEvent(eventType, timeout); break; } } - public void setStickyBehavior(String event, String behavior) { - - Log.d("SET_BEHAVIOR - Sticky", "Event: " + event + " - behavior: " + behavior ); + Log.d("SET_BEHAVIOR - Sticky", "Event: " + event + " - behavior: " + behavior); enableOnClickBehavior(event, behavior); if (behavior.toUpperCase().equals(TCONST.NULL)) { - - if (stickyMap.containsKey(event)) { - stickyMap.remove(event); - } + if (stickyMap.containsKey(event)) stickyMap.remove(event); } else { stickyMap.put(event, behavior); } @@ -212,24 +185,16 @@ public void setStickyBehavior(String event, String behavior) { Integer eventType = ASREventMap.get(event); if (eventType != null) - switch(eventType) { - + switch (eventType) { case TCONST.SILENCE_EVENT: case TCONST.SOUND_EVENT: case TCONST.WORD_EVENT: - - if (behavior.toUpperCase().equals(TCONST.NULL)) { - - mListener.resetStaticEvent(eventType); - } - else { - mListener.configStaticEvent(eventType); - } + if (behavior.toUpperCase().equals(TCONST.NULL)) mListener.resetStaticEvent(eventType); + else mListener.configStaticEvent(eventType); break; } } - /** Special Behavior processing for timed ASR events which must be setup in the listener component * * @param event @@ -237,7 +202,6 @@ public void setStickyBehavior(String event, String behavior) { * @param timeout */ public void setStickyBehavior(String event, String behavior, int timeout) { - // Setup the behavior // setStickyBehavior(event, behavior); @@ -248,35 +212,24 @@ public void setStickyBehavior(String event, String behavior, int timeout) { if (eventType != null) switch(eventType) { - case TCONST.TIMEDSILENCE_EVENT: case TCONST.TIMEDSOUND_EVENT: case TCONST.TIMEDWORD_EVENT: - - if (behavior.toUpperCase().equals(TCONST.NULL)) { - - mListener.resetTimedEvent(eventType); - } - else { - mListener.configTimedEvent(eventType, timeout); - } + if (behavior.toUpperCase().equals(TCONST.NULL)) mListener.resetTimedEvent(eventType); + else mListener.configTimedEvent(eventType, timeout); break; } } - // Execute script target if behavior is defined for this event // @Override public boolean applyBehavior(String event) { + boolean result; - boolean result = false; - - if(!(result = super.applyBehavior(event))) { - + if (!(result = super.applyBehavior(event))) { if (volatileMap.containsKey(event)) { - - RoboTutor.logManager.postEvent_D(QGRAPH_MSG, "target:" + TAG + ",action:applybehavior,type:volatile,behavior:" + event); + RoboTutor.logManager.postEvent_D(TCONST.QGRAPH_MSG, "target:" + TAG + ",action:applybehavior,type:volatile,behavior:" + event); applyBehaviorNode(volatileMap.get(event)); // clear the volatile behavior after use and update the listener if the event is a @@ -285,10 +238,8 @@ public boolean applyBehavior(String event) { setVolatileBehavior(event, TCONST.NULL, 0); result = true; - } else if (stickyMap.containsKey(event)) { - - RoboTutor.logManager.postEvent_D(QGRAPH_MSG, "target:" + TAG + ",action:applybehavior,type:sticky,behavior:" + event); + RoboTutor.logManager.postEvent_D(TCONST.QGRAPH_MSG, "target:" + TAG + ",action:applybehavior,type:sticky,behavior:" + event); applyBehaviorNode(stickyMap.get(event)); result = true; @@ -309,39 +260,28 @@ public void applyBehaviorNode(String nodeName) { IScriptable2 obj = null; if (nodeName != null && !nodeName.equals("") && !nodeName.toUpperCase().equals("NULL")) { - try { - obj = mTutor.getScope().mapSymbol(nodeName); - - if (obj != null) { - - RoboTutor.logManager.postEvent_D(QGRAPH_MSG, "target:" + TAG + ",action:applybehaviornode,type:" + obj.getType() + ",behavior:" + nodeName); + if ((obj = mTutor.getScope().mapSymbol(nodeName)) != null) { + RoboTutor.logManager.postEvent_D(TCONST.QGRAPH_MSG, "target:" + TAG + ",action:applybehaviornode,type:" + obj.getType() + ",behavior:" + nodeName); switch (obj.getType()) { - case TCONST.SUBGRAPH: - mTutor.getSceneGraph().post(this, TCONST.SUBGRAPH_CALL, nodeName); break; case TCONST.MODULE: - // Disallow module "calls" - RoboTutor.logManager.postEvent_E(QGRAPH_MSG, "target:" + TAG + ",action:applybehaviornode,type:modulecall,behavior:" + nodeName + ",ERROR:MODULE Behaviors are not supported"); + RoboTutor.logManager.postEvent_E(TCONST.QGRAPH_MSG, "target:" + TAG + ",action:applybehaviornode,type:modulecall,behavior:" + nodeName + ",ERROR:MODULE Behaviors are not supported"); break; // Note that we should not preEnter queues - they may need to be cancelled // which is done internally. // case TCONST.QUEUE: - - if (obj.testFeatures()) { - obj.applyNode(); - } + if (obj.testFeatures()) obj.applyNode(); break; default: - if (obj.testFeatures()) { obj.preEnter(); obj.applyNode(); @@ -349,7 +289,6 @@ public void applyBehaviorNode(String nodeName) { break; } } - } catch (Exception e) { // TODO: Manage invalid Behavior e.printStackTrace(); @@ -357,7 +296,6 @@ public void applyBehaviorNode(String nodeName) { } } - /** * Do button like behavior defined for component itself - i.e. click anywhere * @@ -365,36 +303,27 @@ public void applyBehaviorNode(String nodeName) { */ @Override public void onClick(View v) { - if (v == this) { - Log.v(QGRAPH_MSG, "event.click: " + " view"); + Log.v(TCONST.QGRAPH_MSG, "event.click: " + " view"); applyBehavior(TCONST.ON_CLICK); } } - @Override public void nextScene() { mTutor.mTutorGraph.post(this, TCONST.NEXTSCENE); } - @Override public void nextNode() { mTutor.mSceneGraph.post(this, TCONST.NEXT_NODE); } - private void enableOnClickBehavior(String event, String behavior) { - if (event.toUpperCase().equals(TCONST.ON_CLICK)) { - - if (behavior.toUpperCase().equals(TCONST.NULL)) { - setOnClickListener(null); - } else { - setOnClickListener(this); - } + if (behavior.toUpperCase().equals(TCONST.NULL)) setOnClickListener(null); + else setOnClickListener(this); } } @@ -403,79 +332,52 @@ private void enableOnClickBehavior(String event, String behavior) { //************************************************************************ - //*********************************************************** // ITutorLogger - Start private void extractHashContents(StringBuilder builder, HashMap map) { - - Iterator tObjects = map.entrySet().iterator(); - - while (tObjects.hasNext() ) { - - builder.append(','); - - Map.Entry entry = (Map.Entry) tObjects.next(); - - String key = entry.getKey().toString(); - String value = "#" + entry.getValue().toString(); - - builder.append(key); - builder.append(value); + for (Object entry : map.entrySet()) { + builder.append("," + ((Map.Entry)entry).getKey().toString()); + builder.append("#" + ((Map.Entry)entry).getValue().toString()); } } private void extractFeatureContents(StringBuilder builder, HashMap map) { - - StringBuilder featureset = new StringBuilder(); - - Iterator tObjects = map.entrySet().iterator(); - // Scan to build a list of active features // - while (tObjects.hasNext() ) { + StringBuilder featureSet = new StringBuilder(); - Map.Entry entry = (Map.Entry) tObjects.next(); - - Boolean value = (Boolean) entry.getValue(); - - if (value) { - featureset.append(entry.getKey().toString() + ";"); - } + for (Object entry : map.entrySet()) { + if ((Boolean)((Map.Entry)entry).getValue()) featureSet.append(((Map.Entry)entry).getKey().toString() + ";"); } - // If there are active features then trim the last ',' and add the - // comma delimited list as the "$features" object. + // If there are active features then trim the last ',' and add the comma delimited list as the "$features" object. // - if (featureset.length() != 0) { - featureset.deleteCharAt(featureset.length()-1); - - builder.append(",$features#" + featureset.toString()); + if (featureSet.length() != 0) { + featureSet.deleteCharAt(featureSet.length() - 1); + builder.append(",$features#" + featureSet.toString()); } } @Override public void logState(String logData) { - StringBuilder builder = new StringBuilder(); extractHashContents(builder, _StringVar); extractHashContents(builder, _IntegerVar); extractFeatureContents(builder, _FeatureMap); - RoboTutor.logManager.postTutorState(TUTOR_STATE_MSG, "target#reading_tutor," + logData + builder.toString()); + RoboTutor.logManager.postTutorState(TCONST.TUTOR_STATE_MSG, "target#reading_tutor," + logData + builder.toString()); } // ITutorLogger - End //*********************************************************** - //************************************************************************ //************************************************************************ // IEventSource Interface START - @Override public String getEventSourceName() { return name(); @@ -486,7 +388,6 @@ public String getEventSourceType() { return "Reading_Component"; } - // IEventSource Interface END //************************************************************************ //************************************************************************ @@ -500,21 +401,15 @@ public String getEventSourceType() { */ @Override public void setDataSource(String dataNameDescriptor) { - try { - // Note that here the {folder] load-type semantics is for a direct encoded link to an // external storydata.json file location // // TODO: work toward consistent [file] semantics as externally sourced files // if (dataNameDescriptor.startsWith(TCONST.LOCAL_FILE)) { - String storyFolder = dataNameDescriptor.substring(TCONST.LOCAL_FILE.length()).toLowerCase(); - - String[] levelval = storyFolder.split("_"); - - String levelFolder = levelval[0]; + String levelFolder = storyFolder.split("_")[0]; // ALAN_HILL (5) here is how to load the image... DATASOURCEPATH = TCONST.DOWNLOAD_RT_TUTOR + "/" + TCONST.STORY_ASSETS + "/" + mMediaManager.getLanguageIANA_2(mTutor) + "/"; @@ -528,10 +423,9 @@ public void setDataSource(String dataNameDescriptor) { // NOTE: we override the CMediaPackage srcpath folder to point to the debug LOCAL_STORY_AUDIO - in Download // configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); - mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH, LOCAL_STORY_AUDIO)); + mMediaManager.addSoundPackage(mTutor, TCONST.MEDIA_STORY, new CMediaPackage(TCONST.LANG_AUTO, AUDIOSOURCEPATH, TCONST.LOCAL_STORY_AUDIO)); loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN); - } else if (dataNameDescriptor.startsWith(TCONST.ENCODED_FOLDER)) { System.out.println("ENCODED FOLDER"); @@ -541,16 +435,7 @@ public void setDataSource(String dataNameDescriptor) { // XYZ-1: mimic this behavior String storyFolder = dataNameDescriptor.substring(TCONST.ENCODED_FOLDER.length()).toLowerCase(); - // "0..10.SD_OFF1_DES.34" - // "3_2" - - String[] levelval = storyFolder.split("_"); - // "0..10.SD", "OFF1", "DES.34" - // "3", "2" - - String levelFolder = levelval[0]; - // "0..10.SD" - // "3" + String levelFolder = storyFolder.split("_")[0]; DATASOURCEPATH = TCONST.ROBOTUTOR_ASSETS + "/" + TCONST.STORY_ASSETS + "/" + mMediaManager.getLanguageIANA_2(mTutor) + "/"; @@ -565,17 +450,15 @@ public void setDataSource(String dataNameDescriptor) { // "cmu/xprize/story_reading//" // XYZ folder path should look like this configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); - mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH)); + mMediaManager.addSoundPackage(mTutor, TCONST.MEDIA_STORY, new CMediaPackage(TCONST.LANG_AUTO, AUDIOSOURCEPATH)); // ZZZ load story!!! // ZZZ STORYSOURCEPATH contains storydata.json and images // ZZZ EXTERN is... TCONST.EXTERN loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN); - } else if (dataNameDescriptor.startsWith(TCONST.SHARED_LITERACY)) { - // ZZZ 1: replace code in Transition Table to make it [sharedliteracy] (DONE) + // ZZZ 1: replace code in Transition Table to make it [sharedliteracy] (DONE) String storyFolder = dataNameDescriptor.substring(TCONST.SHARED_LITERACY.length()).toLowerCase(); - String levelFolder = "literacy"; // don't use level folder... @@ -594,20 +477,13 @@ public void setDataSource(String dataNameDescriptor) { SHAREDPATH = DATASOURCEPATH + TCONST.SHARED_LITERACY_IMAGE_FOLDER + "/"; // "cmu/xprize/story_reading/shared/shared_literacy" - configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); - mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH)); + mMediaManager.addSoundPackage(mTutor, TCONST.MEDIA_STORY, new CMediaPackage(TCONST.LANG_AUTO, AUDIOSOURCEPATH)); loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN_SHARED, SHAREDPATH); - } else if (dataNameDescriptor.startsWith(TCONST.SHARED_MATH)) { - String storyFolder = dataNameDescriptor.substring(TCONST.SHARED_MATH.length()).toLowerCase(); - - String[] levelval = storyFolder.split("_"); - - String levelFolder = levelval[0]; - + String levelFolder = storyFolder.split("_")[0]; DATASOURCEPATH = TCONST.ROBOTUTOR_ASSETS + "/" + TCONST.STORY_ASSETS + "/" + mMediaManager.getLanguageIANA_2(mTutor) + "/"; // "robotutor_assets/assets/story/sw" @@ -622,17 +498,13 @@ public void setDataSource(String dataNameDescriptor) { SHAREDPATH = DATASOURCEPATH + TCONST.SHARED_MATH_FOLDER + "/"; configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); - mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH)); - + mMediaManager.addSoundPackage(mTutor, TCONST.MEDIA_STORY, new CMediaPackage(TCONST.LANG_AUTO, AUDIOSOURCEPATH)); // ZZZ how to change this??? loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN_SHARED, SHAREDPATH); - } else if (dataNameDescriptor.startsWith(TCONST.SONG)) { - String storyFolder = dataNameDescriptor.substring(TCONST.SONG.length()).toLowerCase(); - String levelFolder = "songs"; DATASOURCEPATH = TCONST.ROBOTUTOR_ASSETS + "/" + TCONST.STORY_ASSETS + "/" + mMediaManager.getLanguageIANA_2(mTutor) + "/"; @@ -647,13 +519,12 @@ public void setDataSource(String dataNameDescriptor) { // "cmu/xprize/story_reading/songs/" configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); - mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH)); + mMediaManager.addSoundPackage(mTutor, TCONST.MEDIA_STORY, new CMediaPackage(TCONST.LANG_AUTO, AUDIOSOURCEPATH)); // ZZZ load story!!! // ZZZ STORYSOURCEPATH contains storydata.json and images // ZZZ EXTERN is... TCONST.EXTERN loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN); - } else if (dataNameDescriptor.startsWith(TCONST.WORD_PROBLEMS)) { String storyFolder = dataNameDescriptor.substring(WORD_PROBLEMS.length()).toLowerCase(); storyFolder = storyFolder.substring("math.".length()); @@ -668,19 +539,16 @@ public void setDataSource(String dataNameDescriptor) { mMediaManager.addSoundPackage(mTutor, MEDIA_STORY, new CMediaPackage(LANG_AUTO, AUDIOSOURCEPATH)); loadStory(STORYSOURCEPATH, "ASB_Data", TCONST.EXTERN); - } - - // Note that here the {file] load-type semantics is for an external file and [asset] may be used - // for internal assets. - // - // TODO: work toward consistent [file] semantics as externally sourced files - // - else if (dataNameDescriptor.startsWith(TCONST.SOURCEFILE)) { + } else if (dataNameDescriptor.startsWith(TCONST.SOURCEFILE)) { + // Note that here the {file] load-type semantics is for an external file and [asset] may be used + // for internal assets. + // + // TODO: work toward consistent [file] semantics as externally sourced files + // // The story index is appended as a int - String[] storyval = dataNameDescriptor.split(":"); - int storyIndex = Integer.parseInt(storyval[1]); - + String[] storyval = dataNameDescriptor.split(":"); + int storyIndex = Integer.parseInt(storyval[1]); String dataFile = storyval[0].substring(TCONST.SOURCEFILE.length()).toLowerCase(); DATASOURCEPATH = TCONST.ROBOTUTOR_ASSETS + "/" + TCONST.STORY_ASSETS + "/" + mMediaManager.getLanguageIANA_2(mTutor) + "/"; @@ -689,13 +557,11 @@ else if (dataNameDescriptor.startsWith(TCONST.SOURCEFILE)) { // Load the datasource in the component module - i.e. the superclass // - loadJSON(new JSONObject(jsonData), mTutor.getScope() ); + loadJSON(new JSONObject(jsonData), mTutor.getScope()); configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); setStory(dataSource[storyIndex].storyName, TCONST.EXTERN); - } else if (dataNameDescriptor.startsWith(TCONST.ASSETFILE)) { - String dataFile = dataNameDescriptor.substring(TCONST.ASSETFILE.length()); // Generate a langauage specific path to the data source - @@ -709,21 +575,17 @@ else if (dataNameDescriptor.startsWith(TCONST.SOURCEFILE)) { // Load the datasource in the component module - i.e. the superclass // - loadJSON(new JSONObject(jsonData), mTutor.getScope() ); + loadJSON(new JSONObject(jsonData), mTutor.getScope()); configListenerLanguage(mMediaManager.getLanguageFeature(mTutor)); setStory(dataSource[0].storyName, TCONST.ASSETS); - } else if (dataNameDescriptor.startsWith("db|")) { } else if (dataNameDescriptor.startsWith("{")) { - loadJSON(new JSONObject(dataNameDescriptor), null); - } else { throw (new Exception("BadDataSource")); } - } - catch (Exception e) { + } catch (Exception e) { CErrorManager.logEvent(TAG, "Invalid Data Source for : " + mTutor.getTutorName(), e, true); } System.out.println("AUDIOSOURCEPATH: "+AUDIOSOURCEPATH); @@ -731,18 +593,16 @@ else if (dataNameDescriptor.startsWith(TCONST.SOURCEFILE)) { System.out.println("DATASOURCEPATH: "+DATASOURCEPATH); } - /** - * Inject the listener into the MediaManageer + * Inject the listener into the MediaManager */ @Override public void setListener(ListenerBase listener) { CMediaController.setListener(listener); } - /** - * Remove the listener from the MediaManageer + * Remove the listener from the MediaManager */ @Override public void removeListener(ListenerBase listener) { @@ -750,7 +610,6 @@ public void removeListener(ListenerBase listener) { } - //************************************************************************ //************************************************************************ // IPublisher - START @@ -761,7 +620,6 @@ public void publishState() { @Override public void publishValue(String varName, String value) { - _StringVar.put(varName,value); // update the response variable "." @@ -770,7 +628,6 @@ public void publishValue(String varName, String value) { @Override public void publishValue(String varName, int value) { - _IntegerVar.put(varName,value); // update the response variable "." @@ -779,31 +636,22 @@ public void publishValue(String varName, int value) { @Override public void publishFeatureSet(String featureSet) { - // Add new features - no duplicates List featArray = Arrays.asList(featureSet.split(",")); - for (String feature : featArray) { - - publishFeature(feature); - } + for (String feature : featArray) publishFeature(feature); } @Override public void retractFeatureSet(String featureSet) { - // Add new features - no duplicates List featArray = Arrays.asList(featureSet.split(",")); - for (String feature : featArray) { - - retractFeature(feature); - } + for (String feature : featArray) retractFeature(feature); } @Override public void publishFeature(String feature) { - _FeatureMap.put(feature, true); mTutor.addFeature(feature); } @@ -817,60 +665,32 @@ public void publishFeature(String feature) { */ @Override public void retractFeature(String feature) { - _FeatureMap.put(feature, false); mTutor.delFeature(feature); } - /** * * @param featureMap */ @Override public void publishFeatureMap(HashMap featureMap) { - - Iterator tObjects = featureMap.entrySet().iterator(); - - while (tObjects.hasNext()) { - - Map.Entry entry = (Map.Entry) tObjects.next(); - - Boolean active = (Boolean)entry.getValue(); - - if (active) { - String feature = (String)entry.getKey(); - - mTutor.addFeature(feature); - } + for (Object entry : featureMap.entrySet()) { + if ((Boolean)((Map.Entry)entry).getValue()) mTutor.addFeature((String)((Map.Entry)entry).getKey()); } } - /** * * @param featureMap */ @Override public void retractFeatureMap(HashMap featureMap) { - - Iterator tObjects = featureMap.entrySet().iterator(); - - while (tObjects.hasNext()) { - - Map.Entry entry = (Map.Entry) tObjects.next(); - - Boolean active = (Boolean)entry.getValue(); - - if (active) { - String feature = (String)entry.getKey(); - - mTutor.delFeature(feature); - } + for (Object entry : featureMap.entrySet()) { + if ((Boolean)((Map.Entry)entry).getValue()) mTutor.delFeature((String)((Map.Entry)entry).getKey()); } } - // publish component state data - EBD //************************************************************************ //************************************************************************ @@ -880,82 +700,71 @@ public void retractFeatureMap(HashMap featureMap) { //********************************************************** //***************** Scripting Interface - @Override public void setVisibility(String visible) { - mSceneObject.setVisibility(visible); } - /** * Defer to the base-class * * @param storyName */ + @Override public void setStory(String storyName, String assetLocation) { - super.setStory(storyName, assetLocation); } - @Override public void setFeature(String feature, boolean fadd) { - - if (fadd) { - publishFeature(feature); - } else { - retractFeature(feature); - } + if (fadd) publishFeature(feature); + else retractFeature(feature); } - @Override public boolean testFeature(String feature) { - return mTutor.testFeature(feature); } + @Override + public String getTutorVariant() { + return mTutor.getTutorVariant(); + } + + @Override + public void setTutorFeatures(String variant) { + mTutor.setTutorFeatures(variant, null); + } + @Override public void next() { - reset(); - super.next(); - - if (dataExhausted()) - publishFeature(TCONST.FTR_EOI); + if (dataExhausted()) publishFeature(TCONST.FTR_EOI); } - + @Override public void startStory() { super.startStory(); } - public TBoolean test() { boolean correct = isCorrect(); - - if (correct) - publishFeature("FTR_RIGHT"); - else - publishFeature("FTR_WRONG"); - + publishFeature(correct ? TCONST.GENERIC_RIGHT : TCONST.GENERIC_WRONG); return new TBoolean(correct); } - + @Override public void setPageFlipButton(String command) { super.setPageFlipButton(command); } + @Override public void setSpeakButton(String command) { super.setSpeakButton(command); } - public void onButtonClick(String buttonName) { - switch (buttonName) { case TCONST.PAGEFLIP_BUTTON: applyBehavior(buttonName); @@ -967,10 +776,8 @@ public void onButtonClick(String buttonName) { } } - @Override public void updateContext(String sentence, int index, String[] wordList, int wordIndex, String word, int attempts, boolean virtual, boolean correct) { - currentSentence = sentence; currentIndex = index; sentenceWords = wordList; @@ -981,21 +788,13 @@ public void updateContext(String sentence, int index, String[] wordList, int wor trackAndLogPerformance("WORD", correct); } - @Override public void UpdateValue(boolean correct) { - reset(); - - if (correct) - publishFeature(TCONST.GENERIC_RIGHT); - else - publishFeature(TCONST.GENERIC_WRONG); + publishFeature(correct ? TCONST.GENERIC_RIGHT : TCONST.GENERIC_WRONG); } - public void reset() { - retractFeature(TCONST.GENERIC_RIGHT); retractFeature(TCONST.GENERIC_WRONG); } @@ -1085,7 +884,6 @@ public void continueListening() { mViewManager.continueListening(); } - // Scripting Interface End //************************************************************************ //************************************************************************ @@ -1112,7 +910,6 @@ public void setParent(ITutorSceneImpl mParent) { @Override public void setTutor(CTutor tutor) { - mTutor = tutor; mSceneObject.setTutor(tutor); @@ -1140,7 +937,6 @@ public void setLogManager(ILogManager logManager) { // *** Serialization - /** * Load the data source * @@ -1148,13 +944,11 @@ public void setLogManager(ILogManager logManager) { */ @Override public void loadJSON(JSONObject jsonObj, IScope scope) { - // Log.d(TAG, "Loader iteration"); super.loadJSON(jsonObj, (IScope2) scope); } private void trackAndLogPerformance(String task, boolean correct) { - PerformanceLogItem event = new PerformanceLogItem(); event.setUserId(RoboTutor.STUDENT_ID); @@ -1167,12 +961,9 @@ private void trackAndLogPerformance(String task, boolean correct) { event.setPromotionMode(RoboTutor.getPromotionMode(event.getMatrixName())); event.setLevelName(task); event.setTaskName("story"); - String cleanedSentence = currentSentence.replaceAll(",", "").replaceAll("\"", ""); // logger handles commas and quotes weird - event.setProblemName(cleanedSentence); + if (currentSentence != null) event.setProblemName(currentSentence.replaceAll(",", "").replaceAll("\"", "")); event.setProblemNumber(currentIndex); - if (dataSource != null) { - event.setTotalProblemsCount(dataSource.length); - } + if (dataSource != null) event.setTotalProblemsCount(dataSource.length); event.setSubstepNumber(expectedWordIndex); event.setAttemptNumber(attemptCount); event.setExpectedAnswer(sentenceWords != null && expectedWordIndex < sentenceWords.length ? sentenceWords[expectedWordIndex] : ""); diff --git a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerBase.java b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerBase.java index 6169ecfb0..b4171fad2 100644 --- a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerBase.java +++ b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerBase.java @@ -52,19 +52,18 @@ public class ListenerBase { */ protected SpeechRecognizer recognizer; - static protected ListenerAssets assets; // created in init phase - + static protected ListenerAssets assets; // created in init phase - - protected String captureLabel = ""; // label for capture, logging files - protected boolean IS_LOGGING = false; + protected String captureLabel = ""; // label for capture, logging files + protected boolean IS_LOGGING = false; - protected File configFile; // config file to use, null => default - protected File modelsDir; // saved model directory + protected File configFile; // config file to use, null => default + protected File modelsDir; // saved model directory protected LogMath logMath; // needed for creating Fsgs private String acousticModel = LCONST.KIDS; // LCONST.KIDS | LCONST.ADULT - protected String userID; // User ID - + protected String userID; // User ID // to work around pocketsphinx timing bug: when recognizing continuously across silent pauses, // after a pause hyp words from a speech segments before the pause have their reported frame times @@ -76,7 +75,7 @@ public class ListenerBase { protected IAsrEventListener eventListener; // where to send client notification callbacks protected static final String SENTENCE_SEARCH = "sentence"; // label for our search in decoder - protected static final String JSGF_SEARCH = "jsgf_search"; // label for our search in decoder + protected static final String JSGF_SEARCH = "jsgf_search"; // label for our search in decoder // This is used to map language "Features" to the associated dictionary filenames // Dictionary files are located in the assets/sync/models/lm @@ -89,12 +88,11 @@ public class ListenerBase { dictMap.put("LANG_SW", "SWAHILI.DIC"); } - static private boolean isReady = false; - static private String TAG = "ListenerBase"; + static private boolean isReady = false; + static private String TAG = "ListenerBase"; public ListenerBase() { - } /** @@ -105,11 +103,9 @@ public ListenerBase() { public ListenerBase(String userID) { this.userID = userID; configFile = null; - // decoder setup deferred until init() call. } - /** * construct Listener to setup decoder from a pocketsphinx config file. For path arguments config file must contain * absolute paths on the Android device. @@ -120,47 +116,38 @@ public ListenerBase(String userID) { public ListenerBase(String userID, File config) { this.userID = userID; configFile = config; - // decoder setup deferred until init() call. } - /** * Initialize the listener * * @param langFTR -- application context for locating resources and external storage */ public void setLanguage(String langFTR) { - // Configure the phonetic rules that will be used by the decoder // TODO: Need to make phoneme lang rules dynamic so we may have multiple recognizers - // Phoneme.setTargetLanguage(langFTR); // initialize recognizer for our task - // setupRecognizer(assets.getExternalDir(), configFile, dictMap.get(langFTR)); } - /** * Utility method to initialize the listener assets folder * * @param callback */ public void configListener(IReadyListener callback) { - tutorRoot = callback; new listenerConfigTask().execute((Context) callback); } - /** * Construct and initialize the speech recognizer */ protected void setupRecognizer(File assetsDir, File configFile, String langDictionary) { - try { // save path to modelsDir for use when finding fsgs modelsDir = new File(assetsDir, "models"); @@ -169,14 +156,11 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti // In this config file must specify *all* non-default pocketsphinx parameters if (configFile != null) { recognizer = SpeechRecognizerSetup.setupFromFile(configFile).getRecognizer(); - - } else { // init using default config parameters - - switch(acousticModel) { + } else { + // init using default config parameters + switch (acousticModel) { case LCONST.KIDS: - // create pocketsphinx SpeechRecognizer using the SpeechRecognizerSetup factory method - recognizer = SpeechRecognizerSetup.defaultSetup() // our pronunciation dictionary .setDictionary(new File(modelsDir, "lm/" + langDictionary)) @@ -189,30 +173,20 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti .setBoolean("-verbose", true) // maximum log output .setFloat("-samprate", 16000f) - .setInteger("-nfft", 512) - .setInteger("-frate", 100) - .setFloat("-lowerf", 50f) - .setFloat("-upperf", 6800f) - .setBoolean("-dither", true) - .setInteger("-nfilt", 40) - .setInteger("-ncep", 13) - .setString("-agc", "none") .setFloat("-ascale", 1f) // 20 in default .setBoolean("-backtrace", true) // no in default - .setDouble("-beam", 1e-80) // 1e-48 in default - .setBoolean("-bestpath", false) // yes in default -// .setString("-cmn", "current") + //.setString("-cmn", "current") .setString("-cmn", "prior") .setBoolean("-compallsen", false) .setBoolean("-dictcase", false) @@ -220,35 +194,28 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti .setBoolean("-fwdflat", false) // yes in default .setInteger("-latsize", 5000) .setFloat("-lpbeam", 1e-5f) // 1e-40 in default - .setDouble("-lponlybeam", 7e-29) // - .setFloat("-lw", 10f) // 6.5 in default .setInteger("-maxhmmpf", 1500) // 10000 in default //.setInteger("-maxnewoov", 5000) // 20 in default .setDouble("-pbeam", 1e-80) // 1e-48 in default - .setFloat("-pip", 1f) - .setBoolean("-remove_noise", true) // yes in default - .setBoolean("-remove_silence", true) // yes in default + .setBoolean("-remove_noise", true) // yes in default + .setBoolean("-remove_silence", true) // yes in default .setFloat("-silprob", 1f) // 0.005 in default .setInteger("-topn", 4) .setDouble("-wbeam", 1e-60) // 7e-29 in default - .setFloat("-wip", 1f) // 0.65 in default .getRecognizer(); - break; case LCONST.ADULT: - // create pocketsphinx SpeechRecognizer using the SpeechRecognizerSetup factory method - recognizer = SpeechRecognizerSetup.defaultSetup() // our pronunciation dictionary //.setDictionary(new File(modelsDir, "lm/CMU07A-CAPS.DIC")) @@ -260,19 +227,16 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti // this automatically logs raw audio to the specified directory: .setRawLogDir(assetsDir) - /* can't get sphinx logfile on Android, log messages go to LogCat facility instead - .setString("-logfn", new File(assetsDir, logName).getPath()) - */ + // can't get sphinx logfile on Android, log messages go to LogCat facility instead + //.setString("-logfn", new File(assetsDir, logName).getPath()) .setBoolean("-verbose", true) // maximum log output // a few other settings we might want to experiment with: - // threshold for voice activity detection: .setFloat("-vad_threshold", LCONST.VAD_THRESHOLD) // default 2.0 // other vad parameters: // .setInteger("vad_postspeech", 50) // default 50 (centiseconds) // .setInteger("vad_prespeech", 10) // default 10 (centiseconds) - // .setFloat("-silprob", 0.005f) // default 0.005 .setFloat("-fillprob", LCONST.FILLPROB) // default 1e-8f // .setFloat("-wip", 0.65f) // default 0.65 @@ -284,18 +248,14 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti // save a log math object to use when constructing FsgModels. logMath = new LogMath(); - } - catch (Exception e) { + } catch (Exception e) { CErrorManager.logEvent(TAG, "Recognizer configuration error: ", e, false); } } - - /** - * Moves new assets to an external folder so the Sphinx code can access it. - * - */ + * Moves new assets to an external folder so the Sphinx code can access it. + */ class listenerConfigTask extends AsyncTask { @Override @@ -304,7 +264,6 @@ protected void onPreExecute() { @Override protected Boolean doInBackground(Context... params) { - boolean result = false; try { // sync assets from resources to filesystem via ListenerAssets class @@ -313,7 +272,6 @@ protected Boolean doInBackground(Context... params) { assets = new ListenerAssets(params[0]); assets.syncAssets(); result = true; - } catch (IOException e) { // TODO: Manage exceptions Log.d("ASR", "init Failed: " + e); @@ -330,14 +288,13 @@ protected void onPostExecute(Boolean result) { } } + public void listenFor(String[] wordsToHear, int startWord) { + } - public void listenFor(String[] wordsToHear, int startWord){} - - - public void listenForSentence(String[] wordsToHear, int startWord){} - + public void listenForSentence(String[] wordsToHear, int startWord) { + } - public void updateNextWordIndex(int next){ + public void updateNextWordIndex(int next) { } /** @@ -352,19 +309,16 @@ public boolean isReady() { * Stop the listener. Will send final hypothesis event */ public void stop() { - if (recognizer != null) - recognizer.stop(); + if (recognizer != null) recognizer.stop(); } /** * Cancel the listener. Does not send final hypothesis event */ public void cancel() { - if (recognizer != null) - recognizer.cancel(); + if (recognizer != null) recognizer.cancel(); } - /** * Attach event listener to receive notification callbacks */ @@ -373,8 +327,7 @@ public void setEventListener(IAsrEventListener callbackSink) { } public void setPauseListener(boolean pauseListener) { - if (recognizer != null) - recognizer.setPauseRecognizer(pauseListener); + if (recognizer != null) recognizer.setPauseRecognizer(pauseListener); } /** @@ -382,7 +335,6 @@ public void setPauseListener(boolean pauseListener) { * @return */ public boolean isListening() { - return (recognizer != null)? recognizer.isListening(): false; } @@ -406,9 +358,6 @@ public void resetStaticEvent(int eventType) { recognizer.resetStaticEvent(eventType); } - - - /** * get the path to the capture file for given utterance label */ @@ -416,18 +365,12 @@ public File getCaptureFile(String utteranceLabel) { return new File(recognizer.rawLogDir, utteranceLabel + ".wav"); } - public void deleteLogFiles() { - if (recognizer == null) - return; + if (recognizer == null) return; new File(recognizer.rawLogDir, captureLabel + "-log.txt").delete(); new File(recognizer.rawLogDir, captureLabel + ".raw").delete(); } - - - - /** * class used to hold info about heard words in recognition results. */ @@ -448,7 +391,6 @@ public static class HeardWord { */ public int matchLevel; - /** * default value: no information */ @@ -466,7 +408,6 @@ public static class HeardWord { */ public static final int MATCH_EXACT = 3; - /** * start time of word, milliseconds since epoch */ @@ -560,13 +501,10 @@ public static String[] textToWords(String text) { } - /***** Logging */ - - /** - * get the path to the hypothesis log file for given utterance label + * get the path to the hypothesis log file for given utterance label */ protected File getHypLogFile(String utteranceLabel) { // store it alongside the captured audio file @@ -597,13 +535,9 @@ protected void logHyp(String timestamp, String hyp, List segments, Hear bw.write(" TIME: " + timestamp + "\n"); bw.write(" DECODER OUTPUT: " + hyp + "\n"); bw.write(" RAW SEGMENTS:\n"); - for (Segment s : segments) { - bw.write(s.getWord() + " " + s.getStartFrame() + " " + s.getEndFrame() + "\n"); - } + for (Segment s : segments) bw.write(s.getWord() + " " + s.getStartFrame() + " " + s.getEndFrame() + "\n"); bw.write(" SEGMENTATION:\n"); - for (HeardWord hw : heardWords) { - bw.write(hw.hypWord + " " + hw.startFrame + " " + hw.endFrame + "\n"); - } + for (HeardWord hw : heardWords) bw.write(hw.hypWord + " " + hw.startFrame + " " + hw.endFrame + "\n"); bw.write("\n"); bw.close(); @@ -611,6 +545,4 @@ protected void logHyp(String timestamp, String hyp, List segments, Hear Log.e("logHyp", "Error writing hypothesis log file " + e.getMessage()); } } - - } diff --git a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerJSGF.java b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerJSGF.java index bcf12db7f..29cb7f4b3 100644 --- a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerJSGF.java +++ b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerJSGF.java @@ -19,36 +19,21 @@ package edu.cmu.xprize.listener; -import android.annotation.SuppressLint; -import android.annotation.TargetApi; -import android.content.Context; -import android.os.AsyncTask; -import android.os.Build; import android.os.Environment; import android.text.TextUtils; import android.util.Log; -import java.io.BufferedWriter; import java.io.File; import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.IOException; import java.io.OutputStream; import java.text.SimpleDateFormat; -import java.util.ArrayList; import java.util.Arrays; import java.util.Date; -import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; -import cmu.xprize.util.IReadyListener; -import cmu.xprize.util.TimerUtils; -import edu.cmu.pocketsphinx.FsgModel; import edu.cmu.pocketsphinx.Hypothesis; import edu.cmu.pocketsphinx.LogMath; -import edu.cmu.pocketsphinx.Segment; /** @@ -61,121 +46,94 @@ public class ListenerJSGF extends ListenerBase { // state for the current ListenFor operation - private String sentenceWords[]; // array of sentence words to hear - private int iExpected = 0; // index of expected next word in sentence - private int iNextWord = 0; // Next word expected. + private int iExpected = 0; // index of expected next word in sentence + private int iNextWord = 0; // Next word expected. private HeardWord[] heardWords = null; // latest total aligned hypothesis - private long sentenceStartTime; // time in ms since epoch - private long sentenceStartSamples; // sample counter at sentence start, for adjusting frame numbers + private long sentenceStartTime; // time in ms since epoch + private long sentenceStartSamples; // sample counter at sentence start, for adjusting frame numbers private final int SAMPLES_PER_FRAME = 160; // number of samples in a centisecond frame at 16000 samples/sec private boolean speaking = false; // speaking state. [currently unused] - public final static String ROBOTUTOR_DATA_PATH = Environment.getExternalStorageDirectory().getPath(); - //public final static String ROBOTUTOR_ASSETS_JSGF = "/RoboTutor/util/asr/"; - public final static String ROBOTUTOR_ASSETS_JSGF = "/RoboTutor/"; - public final static String JSGF_TEMP = "jsgf.gram"; + public final static String ROBOTUTOR_DATA_PATH = Environment.getExternalStorageDirectory().getPath(); +// public final static String ROBOTUTOR_ASSETS_JSGF = "/RoboTutor/util/asr/"; + public final static String ROBOTUTOR_ASSETS_JSGF = "/RoboTutor/"; + public final static String JSGF_TEMP = "jsgf.gram"; - public String testGrammar = "#JSGF V1.0;\n" + - "\n" + - "grammar sentence;\n" + - "\n" + + public String testGrammar = "#JSGF V1.0;\n\n" + + "grammar sentence;\n\n" + " = FOR|FORWARD|WARD;\n" + "public = ;\n"; -// " = SIKU| MOJA| MIMI |NA |DADA |YANGU| TULIKUWA| TUKITEMBEA |KARIBU |NA |BARABARA;\n" + +// " = SIKU| MOJA| MIMI |NA |DADA |YANGU| TULIKUWA| TUKITEMBEA |KARIBU |NA |BARABARA;\n" + // "public = ;\n"; //Siku moja, mimi na dada yangu tulikuwa tukitembea karibu na barabara. - /** * Attach event listener to receive notification callbacks */ public void updateNextWordIndex(int next) { - iNextWord = next > 0? next:0; + iNextWord = next > 0 ? next : 0; } - // ------------------------------------------------ // Language model generation // ------------------------------------------------ // generate the language model for given asr words private void generateLM(String[] wordsToHear) { - // ensure all sentence words in dictionary HashSet wordSet = new HashSet<>(Arrays.asList(wordsToHear)); for (String word : wordSet) { if (recognizer.decoder.lookupWord(word) == null) { // word not in dictionary - // Synthesize a pronunciation using English rule-based synthesizer String phonemes = Phoneme.toPhoneme(word).trim(); - if (phonemes.isEmpty()) - continue; + if (phonemes.isEmpty()) continue; Log.i("generateLM", "addWord " + word + " pronunciation " + phonemes); recognizer.decoder.addWord(word, phonemes, 1); // more efficient to pass 1 (true) on last word only? } } } - public void listenFor(String[] wordsToHear, int startWord) { - // Ensure all the words are in the language model - // generateLM(wordsToHear); // generate a grammar that allows any words - // - String grammar = "#JSGF V1.0;\n" + "\n" + - "grammar sentence;\n" + - "\n" + + String grammar = "#JSGF V1.0;\n\n" + + "grammar sentence;\n\n" + " = " + TextUtils.join("|", wordsToHear) + ";\n" + "public = "; - for(int i1 = 0; i1 < wordsToHear.length ; i1++) { - grammar += ""; - } + for (int i1 = 0; i1 < wordsToHear.length; i1++) grammar += ""; grammar += ";\n"; // Listen for a sequence - // listenFor(grammar); } - public void listenForSentence(String[] wordsToHear, int startWord) { - // Ensure all the words are in the language model - // generateLM(wordsToHear); // generate a grammar that allows any words - // - String grammar = "#JSGF V1.0;\n" + "\n" + - "grammar sentence;\n" + - "\n" + + String grammar = "#JSGF V1.0;\n\n" + + "grammar sentence;\n\n" + "public = " + TextUtils.join(" ", wordsToHear) + ";\n"; // Listen for a sequence - // listenFor(grammar); } - public void listenFor(String jSgrammar) { - - String outPath; - - outPath = ROBOTUTOR_DATA_PATH + ROBOTUTOR_ASSETS_JSGF; + String outPath = ROBOTUTOR_DATA_PATH + ROBOTUTOR_ASSETS_JSGF; File outputFile = new File(outPath); - if(!outputFile.exists()) - outputFile.mkdir(); + if (!outputFile.exists()) outputFile.mkdir(); - outPath += JSGF_TEMP; + outPath += JSGF_TEMP; try { OutputStream out = new FileOutputStream(outPath); @@ -183,9 +141,7 @@ public void listenFor(String jSgrammar) { byte[] bytes = jSgrammar.getBytes(); out.write(bytes); out.close(); - } - catch(Exception e) { - + } catch (Exception e) { } outputFile = new File(outPath); @@ -193,19 +149,16 @@ public void listenFor(String jSgrammar) { listenFor(outputFile); } - /** * Set the words to listen for and the starting position * * @param jSgrammar */ public void listenFor(File jSgrammar) { - Log.d("ASR", "ListenFor: " + jSgrammar); // start listening if (recognizer != null) { - // register our language model in the decoder // Note that this replaces any model of the same name - // i.e. SENTENCE_SEARCH - see: pocketsphinx.c:set_search_internal @@ -223,15 +176,13 @@ public void listenFor(File jSgrammar) { // record start time now sentenceStartTime = System.currentTimeMillis(); - //TimerUtils.startTimer(); +// TimerUtils.startTimer(); // start background thread for capturing audio from microphone recognizer.startListening(JSGF_SEARCH, captureLabel); // start per-capture log file for tracing sequence of partial hypotheses for this target - if(IS_LOGGING) - beginHypLog(); - + if (IS_LOGGING) beginHypLog(); } } @@ -241,7 +192,6 @@ public void listenFor(File jSgrammar) { */ @Override protected void setupRecognizer(File assetsDir, File configFile, String langDictionary) { - // save path to modelsDir for use when finding fsgs modelsDir = new File(assetsDir, "models"); @@ -249,37 +199,33 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti // In this config file must specify *all* non-default pocketsphinx parameters if (configFile != null) { recognizer = SpeechRecognizerSetup.setupFromFile(configFile).getRecognizer(); - - } else { // init using default config parameters - + } else { + // init using default config parameters // create pocketsphinx SpeechRecognizer using the SpeechRecognizer2Setup factory method recognizer = SpeechRecognizerSetup.defaultSetup() // our pronunciation dictionary //.setDictionary(new File(modelsDir, "lm/CMU07A-CAPS.DIC")) .setDictionary(new File(modelsDir, "lm/" + langDictionary)) - // our acoustic model + // our acoustic model .setAcousticModel(new File(modelsDir, "hmm/en-us-semi")) - // this automatically logs raw audio to the specified directory: + // this automatically logs raw audio to the specified directory: .setRawLogDir(assetsDir) - /* can't get sphinx logfile on Android, log messages go to LogCat facility instead - .setString("-logfn", new File(assetsDir, logName).getPath()) - */ + // can't get sphinx logfile on Android, log messages go to LogCat facility instead + // .setString("-logfn", new File(assetsDir, logName).getPath()) .setBoolean("-verbose", true) // maximum log output - // a few other settings we might want to experiment with: - - // threshold for voice activity detection: + // a few other settings we might want to experiment with: + // threshold for voice activity detection: .setFloat("-vad_threshold", LCONST.VAD_THRESHOLD) // default 2.0 - // other vad parameters: - // .setInteger("vad_postspeech", 50) // default 50 (centiseconds) - // .setInteger("vad_prespeech", 10) // default 10 (centiseconds) - - // .setFloat("-silprob", 0.005f) // default 0.005 + // other vad parameters: + // .setInteger("vad_postspeech", 50) // default 50 (centiseconds) + // .setInteger("vad_prespeech", 10) // default 10 (centiseconds) + // .setFloat("-silprob", 0.005f) // default 0.005 .setFloat("-fillprob", LCONST.FILLPROB) // default 1e-8f - // .setFloat("-wip", 0.65f) // default 0.65 + // .setFloat("-wip", 0.65f) // default 0.65 .getRecognizer(); } @@ -291,7 +237,6 @@ protected void setupRecognizer(File assetsDir, File configFile, String langDicti recognizer.addListener(new IPocketSphinxListener()); } - // private inner class to hide our event listener implementation. // We receive these events from the SpeechRecognizer object for our own use, and send similar events from the // IAsrEventListener interface to our client app @@ -317,12 +262,10 @@ public void onResult(Hypothesis hypothesis) { @Override public void onError(Exception e) { - } @Override public void onTimeout() { - } @Override @@ -330,8 +273,7 @@ public void onBeginningOfSpeech() { speaking = true; // forward to listener client app - if (eventListener != null) - eventListener.onBeginningOfSpeech(); + if (eventListener != null) eventListener.onBeginningOfSpeech(); } @Override @@ -342,8 +284,7 @@ public void onEndOfSpeech() { prePauseResult = heardWords; // forward to listener client app - if (eventListener != null) - eventListener.onEndOfSpeech(); + if (eventListener != null) eventListener.onEndOfSpeech(); } @Override @@ -352,22 +293,15 @@ public void onASREvent(int eventType) { } } - // handle a partial or final hypothesis from pocketsphinx private void processHypothesis(Hypothesis hypothesis, Boolean finalResult) { - - } // handle a partial or final hypothesis from pocketsphinx private void processHypothesis(String[] hypothesis) { - // post update to client component - // - if (eventListener != null) { - eventListener.onUpdate(hypothesis, false); - } + if (eventListener != null) eventListener.onUpdate(hypothesis, false); } } // end Listener class diff --git a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerPLRT.java b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerPLRT.java index 497b9e6b6..7ee826764 100644 --- a/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerPLRT.java +++ b/comp_listener/src/main/java/edu/cmu/xprize/listener/ListenerPLRT.java @@ -19,11 +19,6 @@ package edu.cmu.xprize.listener; -import android.annotation.SuppressLint; -import android.annotation.TargetApi; -import android.content.Context; -import android.os.AsyncTask; -import android.os.Build; import android.text.TextUtils; import android.util.Log; @@ -35,16 +30,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; -import cmu.xprize.util.IReadyListener; -import cmu.xprize.util.TimerUtils; import edu.cmu.pocketsphinx.FsgModel; import edu.cmu.pocketsphinx.Hypothesis; -import edu.cmu.pocketsphinx.LogMath; import edu.cmu.pocketsphinx.Segment; @@ -59,15 +50,16 @@ public class ListenerPLRT extends ListenerBase { // state for the current ListenFor operation - private String sentenceWords[]; // array of sentence words to hear - private int iExpected = 0; // index of expected next word in sentence - private int iNextWord = 0; // Next word expected. - private HeardWord[] heardWords = null; // latest total aligned hypothesis - private long sentenceStartTime; // time in ms since epoch - private long sentenceStartSamples; // sample counter at sentence start, for adjusting frame numbers - private final int SAMPLES_PER_FRAME = 160; // number of samples in a centisecond frame at 16000 samples/sec - private boolean useTruncations = true; // Flag whether or not to use truncations. - private boolean speaking = false; // speaking state. [currently unused] + private String[] sentenceWords; // array of sentence words to hear + private int iExpected = 0; // index of expected next word in sentence + private int iNextWord = 0; // Next word expected. + private HeardWord[] heardWords = null; // latest total aligned hypothesis + private long sentenceStartTime; // time in ms since epoch + private long sentenceStartSamples; // sample counter at sentence start, for adjusting frame numbers + private final int SAMPLES_PER_FRAME = 160; // number of samples in a centisecond frame at 16000 samples/sec + private boolean useTruncations = true; // Flag whether or not to use truncations. + private boolean speaking = false; // speaking state. [currently unused] + /** * Attach event listener to receive notification callbacks @@ -84,7 +76,6 @@ public void updateNextWordIndex(int next) { * @param startWord -- 0-based index of word to expect next */ public void listenFor(String[] wordsToHear, int startWord) { - Log.d("STABLE", "ListenFor: " + TextUtils.join(" ", wordsToHear)); // try to build the language model. Note this updates dictionary attached to decoder @@ -98,11 +89,10 @@ public void listenFor(String[] wordsToHear, int startWord) { // Remember the current sentence words and start position - // The start position is used by MultiMatch to align the hypothesis with the sentence. sentenceWords = wordsToHear; - iExpected = startWord; + iExpected = startWord; // start listening if (recognizer != null) { - // register our language model in the decoder // Note that this replaces any model of the same name - // i.e. SENTENCE_SEARCH - see: pocketsphinx.c:set_search_internal @@ -114,8 +104,7 @@ public void listenFor(String[] wordsToHear, int startWord) { // reset prePause cache prePauseResult = null; - // save stream offset of start of utterance, for converting stream-based frame times - // to utterance-based times. + // save stream offset of start of utterance, for converting stream-based frame times to utterance-based times sentenceStartSamples = recognizer.nSamples; // record start time now sentenceStartTime = System.currentTimeMillis(); @@ -126,26 +115,21 @@ public void listenFor(String[] wordsToHear, int startWord) { recognizer.startListening(SENTENCE_SEARCH, captureLabel); // start per-capture log file for tracing sequence of partial hypotheses for this target - if(IS_LOGGING) - beginHypLog(); - + if (IS_LOGGING) beginHypLog(); } } - /** * Construct and initialize the speech recognizer */ @Override protected void setupRecognizer(File assetsDir, File configFile, String langDictionary) { - super.setupRecognizer(assetsDir, configFile, langDictionary); // use a private implementation to receive events from pocketsphinx recognizer.addListener(new IPocketSphinxListener()); } - // private inner class to hide our event listener implementation. // We receive these events from the SpeechRecognizer object for our own use, and send similar events from the // IAsrEventListener interface to our client app @@ -154,8 +138,8 @@ private class IPocketSphinxListener implements ITutorListener { @Override public void onStableResult(String[] hypothesis) { // NOTE: that hypothesis may be null during shutdown - if(hypothesis != null) { - Log.i("ASR", "Part Hyp: " + TextUtils.join(" ", hypothesis)); + if (hypothesis != null) { + Log.i("ASR", "Stable Hyp: " + TextUtils.join(" ", hypothesis)); processHypothesis(hypothesis, false); } } @@ -163,8 +147,8 @@ public void onStableResult(String[] hypothesis) { @Override public void onPartialResult(Hypothesis hypothesis) { // NOTE: that hypothesis may be null during shutdown - if(hypothesis != null) { - Log.i("ASR", "Part Hyp: " + hypothesis.getHypstr()); + if (hypothesis != null) { + Log.i("ASR", "Partial Hyp: " + hypothesis.getHypstr()); processHypothesis(hypothesis, false); } } @@ -172,7 +156,7 @@ public void onPartialResult(Hypothesis hypothesis) { @Override public void onResult(Hypothesis hypothesis) { // NOTE: that hypothesis may be null during shutdown - if(hypothesis != null) { + if (hypothesis != null) { Log.i("ASR", "Final Hyp: " + hypothesis.getHypstr()); processHypothesis(hypothesis, true); } @@ -180,12 +164,10 @@ public void onResult(Hypothesis hypothesis) { @Override public void onError(Exception e) { - } @Override public void onTimeout() { - } @Override @@ -193,8 +175,7 @@ public void onBeginningOfSpeech() { speaking = true; // forward to listener client app - if (eventListener != null) - eventListener.onBeginningOfSpeech(); + if (eventListener != null) eventListener.onBeginningOfSpeech(); } @Override @@ -205,8 +186,7 @@ public void onEndOfSpeech() { prePauseResult = heardWords; // forward to listener client app - if (eventListener != null) - eventListener.onEndOfSpeech(); + if (eventListener != null) eventListener.onEndOfSpeech(); } @Override @@ -264,34 +244,25 @@ private static String NthPronName(String word, int ord) { * @return */ private FsgModel generateLM(String[] wordsToHear, int startWord) { - // ensure all sentence words in dictionary - // HashSet wordSet = new HashSet<>(Arrays.asList(wordsToHear)); for (String word : wordSet) { - if (recognizer.decoder.lookupWord(word) == null) { // word not in dictionary - // Synthesize a pronunciation using English rule-based synthesizer - // String phonemes = Phoneme.toPhoneme(word).trim(); - if (phonemes.isEmpty()) - continue; + if (phonemes.isEmpty()) continue; Log.i("generateLM", "addWord " + word + " pronunciation " + phonemes); recognizer.decoder.addWord(word, phonemes, 1); // more efficient to pass 1 (true) on last word only? } // ensure START_ words for truncated readings are in dictionary - if(useTruncations && recognizer.decoder.lookupWord(startWord(word)) == null) { - addTruncations(word); - } + if (useTruncations && recognizer.decoder.lookupWord(startWord(word)) == null) addTruncations(word); } // have to write to a temporary file to create LM - // String filename = "lm/fsg.txt"; File fsgFile = new File(modelsDir, filename); @@ -309,59 +280,47 @@ private FsgModel generateLM(String[] wordsToHear, int startWord) { final double PrJump = 0.03; // write the fsg file header info - int state_count = wordsToHear.length + 1; - int final_state = state_count - 1; + int final_state = wordsToHear.length; bw.write("FSG_BEGIN sentence\n"); - bw.write("NUM_STATES " + state_count + "\n"); + bw.write("NUM_STATES " + (final_state + 1) + "\n"); bw.write("START_STATE " + startWord + "\n"); bw.write("FINAL_STATE " + final_state + "\n"); // factor to normalize transition probabilities based on sentence length - int n = wordsToHear.length - 1; - if (n < 1) n = 1; + int n = wordsToHear.length; // add state transitions - for (int i = 0; i < state_count - 1; i++) { + for (int i = 0; i < final_state; i++) { // emit word i for transition from state i to i + 1 with probability PrCorrect AddFSGTransition(i, i + 1, PrCorrect, wordsToHear[i]); - //if this is not the last word of the sentence emit null word from transition from state i to the final state with probability PrEndEarly - if (i != final_state - 1) { - AddFSGTransition(i, final_state, PrEndEarly, ""); - } + // if this is not the last word of the sentence emit null word from transition from state i to the final state with probability PrEndEarly + if (i != final_state - 1) AddFSGTransition(i, final_state, PrEndEarly, ""); // truncations not yet implemented for words not in dictionary - if(useTruncations && recognizer.decoder.lookupWord(startWord(wordsToHear[i])) != null) { - //emit word i truncation for transition from state i to state i with probability PrTruncate + if (useTruncations && recognizer.decoder.lookupWord(startWord(wordsToHear[i])) != null) { + // emit word i truncation for transition from state i to state i with probability PrTruncate AddFSGTransition(i, i, PrTruncate, startWord(wordsToHear[i])); - //emit word i truncation for transition from state i to i + 1 with probability PrResume + // emit word i truncation for transition from state i to i + 1 with probability PrResume AddFSGTransition(i, i + 1, PrResume, startWord(wordsToHear[i])); } - //if i <> 0 emit null word for jump from state i back to state 0 with probability PrRestart - if (i != 0) { - AddFSGTransition(i, 0, PrRestart / n, ""); - } + // if i <> 0 emit null word for jump from state i back to state 0 with probability PrRestart + if (i != 0) AddFSGTransition(i, 0, PrRestart, ""); - //emit word i for transition from state i to state i with probability PrRepeat + // emit word i for transition from state i to state i with probability PrRepeat / n AddFSGTransition(i, i, PrRepeat / n, wordsToHear[i]); - // emit null word for jump from state i to state j with probability PrJump for all states j except state 0 - for (int j = 1; j < state_count - 1; j++) { - if (i != j) { - AddFSGTransition(i, j, PrJump / n, ""); - } - } + // emit null word for jump from state i to state j with probability PrJump / n - 1 for all states j except state 0 + for (int j = 1; j < final_state; j++) if (i != j) AddFSGTransition(i, j, PrJump / (n - 1), ""); } // add jump from final state back to start with probability PrRestart - AddFSGTransition(final_state, 0, PrRestart / n, ""); + AddFSGTransition(final_state, 0, PrRestart, ""); - // add jump from final state back to each earlier state - for (int st = 1; st < state_count - 1; st++) { - AddFSGTransition(final_state, st, PrJump / n, ""); - } + // add jump from final state back to each earlier state with probability PrJump / n - 1 + for (int j = 1; j < final_state; j++) AddFSGTransition(final_state, j, PrJump / (n - 1), ""); // done writing the file bw.write("FSG_END\n"); @@ -379,7 +338,6 @@ private void AddFSGTransition(int from, int to, double prob, String word) throws bw.write("TRANSITION " + from + " " + to + " " + prob + " " + word + "\n"); } - // add entries for all truncations of given ASR word to dictionary private void addTruncations(String word) { String truncWord = startWord(word); @@ -405,11 +363,8 @@ private void addTruncations(String word) { } } - // handle a partial or final hypothesis from pocketsphinx - // private void processHypothesis(Hypothesis hypothesis, Boolean finalResult) { - if (hypothesis == null) return; // get array of hypothesis words @@ -418,11 +373,8 @@ private void processHypothesis(Hypothesis hypothesis, Boolean finalResult) { processHypothesis(asrWords, finalResult); } - // handle a partial or final hypothesis from pocketsphinx - // private void processHypothesis(String[] asrWords, Boolean finalResult) { - String timestamp = timestampMillis(); // save receipt timestamp for logging if (sentenceWords == null) { @@ -430,20 +382,16 @@ private void processHypothesis(String[] asrWords, Boolean finalResult) { return; } - if (asrWords.length < 1) - return; + if (asrWords.length < 1) return; // get the list of segments ArrayList segments = new ArrayList<>(); - for (Segment s : recognizer.decoder.seg()) { - segments.add(s); - } + for (Segment s : recognizer.decoder.seg()) segments.add(s); - // optional: strip last hyp word if it is not terminated by silence because it is unreliable String[] wordsToUse = asrWords; + // optional: strip last hyp word if it is not terminated by silence because it is unreliable if (LCONST.LAST_WORD_LAG) { - // Find word of last segment in the segmentation detail String lastSegmentWord = null; @@ -460,7 +408,6 @@ private void processHypothesis(String[] asrWords, Boolean finalResult) { } if (wordsToUse.length >= 1) { - long multimatchTimer = System.currentTimeMillis(); // align hyp words with sentence words @@ -472,14 +419,10 @@ private void processHypothesis(String[] asrWords, Boolean finalResult) { getWordTimes(heardWords, sentenceWords, segments); // post update to client component - // - if (eventListener != null) { - eventListener.onUpdate(heardWords, finalResult); - } + if (eventListener != null) eventListener.onUpdate(heardWords, finalResult); // log the partial hypothesis - if(IS_LOGGING) - logHyp(timestamp, TextUtils.join(" ", asrWords), segments, heardWords); + if (IS_LOGGING) logHyp(timestamp, TextUtils.join(" ", asrWords), segments, heardWords); } } @@ -488,9 +431,11 @@ private void processHypothesis(String[] asrWords, Boolean finalResult) { // ------------------------------------------------ static private class MultiMatchScore { // record kept for one possible word alignment + int cost; // penalty for this alignment int nMatches; // number of word matches for this alignment - int iPrev; // sentence index of previous hyp word's alignment + int iPrev; // sentence index of previous hyp word's alignment + MultiMatchScore(int inCost, int inMatches, int inPrev) { cost = inCost; @@ -498,23 +443,20 @@ static private class MultiMatchScore { // record kept for one possible word a iPrev = inPrev; } - MultiMatchScore() { // init to very high score before searching for minimum + MultiMatchScore() { + // init to very high score before searching for minimum cost = 1000000; nMatches = 0; iPrev = -1; } } - // Multimatch costs: scaled by 100 from RT version to use integer arithmetic // cost for mismatch hypWord with sentenceWord private int mismatchCost(String hypWord, String sentenceWord) { - if (asrWordMatches(hypWord, sentenceWord)) - return 0; - - if(useTruncations && asrWordIsTruncationOf(hypWord, sentenceWord)) - return 0; + if (asrWordMatches(hypWord, sentenceWord)) return 0; + if (useTruncations && asrWordIsTruncationOf(hypWord, sentenceWord)) return 0; // else mismatch return 100; @@ -523,31 +465,24 @@ private int mismatchCost(String hypWord, String sentenceWord) { // cost of jump from position i to j private int jumpCost(int from, int to) { // different cost when LeftToRight alignment is configured - if (LCONST.ALIGN_L2R) - return jumpCostL2R(from, to); + if (LCONST.ALIGN_L2R) return jumpCostL2R(from, to); // else normal "chase the reader" alignment - if (to == from + 1) // no cost for sequential reading - return 0; - if (to == from) // small cost so advancing over HO HO beats repeating HO - return 1; - return 100; // cost of a jump, any direction or size + if (to == from + 1) return 0; // no cost for sequential reading + if (to == from) return 1; // small cost so advancing over HO HO beats repeating HO + return 100; // cost of a jump, any direction or size } // cost of jump when L2R alignment is being used private int jumpCostL2R(int from, int to) { - if (to == from + 1) // no cost for sequential reading - return 0; - if (to == from) // small cost so advancing over HO HO beats repeating HO - return 1; - if (to == from + 2) // skip one word: normal jump cost - return 100; - - return 999999; // very high cost for any other jump + if (to == from + 1) return 0; // no cost for sequential reading + if (to == from) return 1; // small cost so advancing over HO HO beats repeating HO + if (to == from + 2) return 100; // skip one word: normal jump cost + + return 999999; // very high cost for any other jump } // find least-cost alignment of hypWords to sentenceWords - @SuppressLint("NewApi") private HeardWord[] doMultiMatch(String[] hypWords, String[] sentenceWords) { // build array or HeardWord's to hold multimatch result ArrayList heardWords = new ArrayList<>(); @@ -556,57 +491,52 @@ private HeardWord[] doMultiMatch(String[] hypWords, String[] sentenceWords) { int costCalcWords = 0; // store scores in matrix, one row per hypWord with one column for each sentence position it could be aligned with for (int h = 0; h < hypWords.length; h++) { - MultiMatchScore multiMatchScore[] = new MultiMatchScore[sentenceWords.length]; + MultiMatchScore[] multiMatchScore = new MultiMatchScore[sentenceWords.length]; for (int s = 0; s < sentenceWords.length; s++) { - //@@ TODO: remove this dependency -// if (!ReadingTutorActivity.isWordCredited(s)) -// break; - // This is an experiment - should eliminate matches past expected word. +// if (!ReadingTutorActivity.isWordCredited(s)) break; + + // This is an experiment - should eliminate matches past expected word // TODO: TEST -> probably needs to be updated dynamically - if(s > iNextWord+1) - break; + if (s > iNextWord + 1) break; int mismatchCostHere = mismatchCost(hypWords[h], sentenceWords[s]); // match cost this position int matchesHere = asrWordMatches(hypWords[h], sentenceWords[s]) ? 1 : 0; - if (h == 0) { // first row, no predecessor => compute jump cost from just before expected start word + if (h == 0) { + // first row, no predecessor => compute jump cost from just before expected start word int cost = mismatchCostHere + jumpCost(iExpected - 1, s); multiMatchScore[s] = new MultiMatchScore(cost, matchesHere, -1); costCalcWords++; } else { // find lowest cost we can achieve here from each possible previous hypword alignment - MultiMatchScore prevWord[] = multiMatchScores.get(h - 1); + MultiMatchScore[] prevWordScore = multiMatchScores.get(h - 1); + MultiMatchScore best = new MultiMatchScore(); // best found so far for (int j = 0; j < costCalcWords; j++) { - int cost = prevWord[j].cost + mismatchCostHere + jumpCost(j, s); - int matches = prevWord[j].nMatches + matchesHere; - if (cost < best.cost || - (cost == best.cost && matches > best.nMatches) || - (cost == best.cost && matches == best.nMatches && jumpCost(j, s) == 0)) { + int cost = prevWordScore[j].cost + mismatchCostHere + jumpCost(j, s); + int matches = prevWordScore[j].nMatches + matchesHere; + if (cost < best.cost || (cost == best.cost && matches > best.nMatches) || (cost == best.cost && matches == best.nMatches && jumpCost(j, s) == 0)) best = new MultiMatchScore(cost, matches, j); - } } // record best value possible for this hypword alignment multiMatchScore[s] = best; } } + multiMatchScores.add(h, multiMatchScore); heardWords.add(h, new HeardWord(hypWords[h])); } // search last row to find best possible alignment of last hypWord - int hLast = heardWords.size() - 1; - int best_alignment = -1; + MultiMatchScore[] multiMatchScore = multiMatchScores.get(heardWords.size() - 1); MultiMatchScore best = new MultiMatchScore(); - MultiMatchScore multiMatchScore[] = multiMatchScores.get(hLast); - + int best_alignment = -1; for (int i = 0; i < costCalcWords; i++) { - if (multiMatchScore[i].cost < best.cost || - (multiMatchScore[i].cost == best.cost && multiMatchScore[i].nMatches > best.nMatches)) { + if (multiMatchScore[i].cost < best.cost || (multiMatchScore[i].cost == best.cost && multiMatchScore[i].nMatches > best.nMatches)) { best = multiMatchScore[i]; best_alignment = i; } @@ -620,12 +550,9 @@ private HeardWord[] doMultiMatch(String[] hypWords, String[] sentenceWords) { heardWord.iSentenceWord = best_alignment; // record match type - if (asrWordMatches(hypWords[h], sentenceWords[best_alignment])) - heardWord.matchLevel = HeardWord.MATCH_EXACT; - else if(useTruncations && asrWordIsTruncationOf(hypWords[h], sentenceWords[best_alignment])) - heardWord.matchLevel = HeardWord.MATCH_TRUNCATION; - else if (!hypWords[h].isEmpty()) // sanity check - heardWord.matchLevel = HeardWord.MATCH_MISCUE; + if (asrWordMatches(hypWords[h], sentenceWords[best_alignment])) heardWord.matchLevel = HeardWord.MATCH_EXACT; + else if (useTruncations && asrWordIsTruncationOf(hypWords[h], sentenceWords[best_alignment])) heardWord.matchLevel = HeardWord.MATCH_TRUNCATION; + else if (!hypWords[h].isEmpty()) heardWord.matchLevel = HeardWord.MATCH_MISCUE; // sanity check // would also record lots of other context about hypWord here @@ -635,7 +562,7 @@ else if (!hypWords[h].isEmpty()) // sanity check } // return the aligned word array - HeardWord words[] = new HeardWord[heardWords.size()]; + HeardWord[] words = new HeardWord[heardWords.size()]; words = heardWords.toArray(words); return words; } @@ -649,8 +576,8 @@ private void getWordTimes(HeardWord[] heardWords, String[] sentenceWords, List after the last matching hypword, so quit after last hyp word int h = 0; // index of next hypword to match for (Segment s : segments) { - if (h < heardWords.length && - HeardWord.asrWordText(s.getWord()).equals(HeardWord.asrWordText(heardWords[h].hypWord))) { // segment matches next hyp word + if (h < heardWords.length && HeardWord.asrWordText(s.getWord()).equals(HeardWord.asrWordText(heardWords[h].hypWord))) { + // segment matches next hyp word // fill in utterance boilerplate heardWords[h].utteranceStartTime = sentenceStartTime; @@ -665,16 +592,13 @@ private void getWordTimes(HeardWord[] heardWords, String[] sentenceWords, List= heardWords.length) // have matched all heardWords - break; + if (++h >= heardWords.length)break; // have matched all heardWords } } // work around pocketsphinx bug: // patch in previously-computed timings for hyp words before most recent pause - if (prePauseResult != null && prePauseResult.length <= heardWords.length) { - System.arraycopy(prePauseResult, 0, heardWords, 0, prePauseResult.length); - } + if (prePauseResult != null && prePauseResult.length <= heardWords.length) System.arraycopy(prePauseResult, 0, heardWords, 0, prePauseResult.length); // Now that we have correct times, fill in derived measures latency and silence addLatency(heardWords); @@ -688,40 +612,38 @@ private void addLatency(HeardWord[] heardWords) { if (h == 0) { // special case for first word // silence is time from start of utterance hw.silence = (int) (hw.startTime - hw.utteranceStartTime); - continue; // latency undefined for first word, remains -1 as initialized - } - // else on non-first word - - // silence is time from end of previous word. - hw.silence = (int) (hw.startTime - heardWords[h - 1].endTime); - - // Following gets "best case" [most charitable] latency as computed by the Reading Tutor: if current word is aligned - // with sentence word s, get time from the end of most recent hyp word aligned with sentence word s-1. - if (hw.iSentenceWord > 0) { - for (int hPrev = h - 1; hPrev >= 0; hPrev--) { // search backwards through prior hyp words - HeardWord priorHypWord = heardWords[hPrev]; - - if (priorHypWord.iSentenceWord == hw.iSentenceWord - 1) { - hw.latency = (int) (hw.startTime - priorHypWord.endTime); - break; + // latency undefined for first word, remains -1 as initialized + } else { + // silence is time from end of previous word. + hw.silence = (int)(hw.startTime - heardWords[h - 1].endTime); + + // Following gets "best case" [most charitable] latency as computed by the Reading Tutor: if current word is aligned + // with sentence word s, get time from the end of most recent hyp word aligned with sentence word s-1. + if (hw.iSentenceWord > 0) { + for (int hPrev = h - 1; hPrev >= 0; hPrev--) { // search backwards through prior hyp words + HeardWord priorHypWord = heardWords[hPrev]; + + if (priorHypWord.iSentenceWord == hw.iSentenceWord - 1) { + hw.latency = (int)(hw.startTime - priorHypWord.endTime); + break; + } } } } } } - // create and write header of hypothesis log file. @Override protected void beginHypLog() { Log.i("beginHypLog", "starting hypothesis log"); try { File hypLog = getHypLogFile(captureLabel); + Log.d("beginHypLog", "hypothesis log file: " + hypLog); BufferedWriter bw = new BufferedWriter(new FileWriter(hypLog.getPath(), false)); String configPath = ""; - if (this.configFile != null) - configPath = this.configFile.getPath(); + if (this.configFile != null) configPath = this.configFile.getPath(); // write header information. Not exactly the same format as ReadingTutor .hea file bw.write("UTTERANCE ID: " + captureLabel + "\n"); @@ -736,9 +658,7 @@ protected void beginHypLog() { bw.close(); } catch (IOException e) { - Log.e("beginHypothesisLog", "Error writing hypothesis log file " + e); + Log.e("beginHypLog", "Error writing hypothesis log file " + e); } } - - } // end Listener class diff --git a/comp_listener/src/main/java/edu/cmu/xprize/listener/SpeechRecognizer.java b/comp_listener/src/main/java/edu/cmu/xprize/listener/SpeechRecognizer.java index 760d280f6..716cf6993 100644 --- a/comp_listener/src/main/java/edu/cmu/xprize/listener/SpeechRecognizer.java +++ b/comp_listener/src/main/java/edu/cmu/xprize/listener/SpeechRecognizer.java @@ -35,21 +35,13 @@ // pocketsphinx-android so we can customize it as SpeechRecognizer in our // package. Needed to access other decoder methods. -import android.media.AudioFormat; import android.media.AudioRecord; -import android.media.MediaRecorder.AudioSource; import android.os.Handler; import android.os.Looper; import android.text.TextUtils; import android.util.Log; -import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -61,7 +53,6 @@ import edu.cmu.pocketsphinx.Hypothesis; import edu.cmu.pocketsphinx.RecognitionListener; -import static cmu.xprize.comp_logging.CAudioLogThread.BUFFER_SIZE; import static cmu.xprize.comp_logging.CAudioLogThread.readBuffer; import static java.lang.String.format; @@ -78,10 +69,10 @@ public class SpeechRecognizer { /** * the pocketsphinx Decoder, public so clients can access decoder methods */ - public Decoder decoder; + public Decoder decoder; private boolean wantFinal = false; - //private HashMap decoderMap; +// private HashMap decoderMap; /** * total number of samples passed to the decoder in the stream. Need to subtract off utterance start frame @@ -89,11 +80,11 @@ public class SpeechRecognizer { * modified by the background thread capturing from the microphone */ public volatile long nSamples; - public short mPeak = 0; + public short mPeak = 0; /** * size of the buffer to use. For mapping stream-based frame time, we want buffer size to be a multiple of - * centisecond frame size = 160. This size receives updated hypothesis 10 times a second + * centisecond frame size = 160. This size receives updated hypothesis 10 times a second */ private static final int BUFFER_SIZE = 1600; // 1/10 seconds worth at 16 Khz /** @@ -106,10 +97,10 @@ public class SpeechRecognizer { private final Handler mainHandler = new Handler(Looper.getMainLooper()); private final Collection listeners = new HashSet<>(); - private final int sampleRate; // for sample rate check - private volatile boolean isPausedRecognizer = true; // start in the paused state + private final int sampleRate; // for sample rate check + private volatile boolean isPausedRecognizer = true; // start in the paused state private volatile boolean isRunningRecognizer = false; - private volatile boolean isDecoding = false; // start in the not decoding state + private volatile boolean isDecoding = false; // start in the not decoding state private ASREvents eventManager; @@ -118,18 +109,17 @@ public class SpeechRecognizer { private ResultEvent nextHypothesisEvt = null; private ResultEvent prevHypothesisEvt = null; - private String[] prevAsrWords; + private String[] prevAsrWords; private ArrayList wordLastChanged; - private Long stableHypTime = 0L; - private String publishType = TCONST.STABLE_HYPOTHESES; -// private String publishType = TCONST.RAW_HYPOTHESES; + private Long stableHypTime = 0L; + private String publishType = TCONST.STABLE_HYPOTHESES; +// private String publishType = TCONST.RAW_HYPOTHESES; protected SpeechRecognizer(Config config) { sampleRate = (int) config.getFloat("-samprate"); - if (config.getFloat("-samprate") != sampleRate) - throw new IllegalArgumentException("sampling rate must be integer"); + if (config.getFloat("-samprate") != sampleRate) throw new IllegalArgumentException("sampling rate must be integer"); // save the configured raw log directory, for processing raw capture files rawLogDir = config.getString("-rawlogdir"); decoder = new Decoder(config); @@ -148,7 +138,6 @@ public void addListener(ITutorListener listener) { } } - /** * Removes listener. */ @@ -158,14 +147,12 @@ public void removeListener(RecognitionListener listener) { } } - /** * Starts recognition. Does nothing if recognition is active. * * @return true if recognition was actually started */ - public boolean startListening(String searchName, String label) { - + public void startListening(String searchName, String label) { Log.i("ASR", format("Start recognition \"%s\"", searchName)); decoder.setSearch(searchName); @@ -176,13 +163,9 @@ public boolean startListening(String searchName, String label) { } Log.i("ASR", "Start Utterance"); - eventManager.updateStartTime(TCONST.TIMEDSTART_EVENT, TCONST.ALL_EVENTS); - - return true; } - /** * Use this to restart the listener with a new utterance. Once the recognizer is stopped * in this fashion it may be restarted with setPauseListener or reInitializeListener - false @@ -193,7 +176,6 @@ public boolean startListening(String searchName, String label) { * @param pause */ public void setRestartListener(boolean pause) { - if (recognizerThread != null) { setPauseRecognizer(pause); @@ -202,77 +184,60 @@ public void setRestartListener(boolean pause) { Log.i("ASR", "Restart Recognizer"); // End the Utterance so we can restart the decoder with a new search - // Reset isDecoding so whent he thread restarts the decoder will start + // Reset isDecoding so when the thread restarts the decoder will start // a new utterance automatically. - // isDecoding = false; decoder.endUtt(); } } } - /** * * @param pausing */ public void setPauseRecognizer(boolean pausing) { - if (recognizerThread != null) { - // If we are releasing the thread and it is paused then notify the monitor // Don't send notifies when not required - // if (!pausing && isPausedRecognizer) { - // Note that notify must be within a synchronized block or it will fail // as it won't have the monitor currently - even though we know the thread // has been stopped. - // synchronized (recognizerThread) { Log.i("ASR", "Resume Thread"); isPausedRecognizer = false; try { recognizerThread.notify(); - } - catch (Exception e) { + } catch (Exception e) { Log.d("ASR", "Exception: " + e); } } - } - - // Otherwise if we are pausing and it is running - don't pause a paused thread - // this would end up in a deadlock - This can happen if there are nested calls - // - // Wait for the thread to pause - Once inside this block we know the - // recognizerThread is sitting at PAUSED_TAG (search text) - // - else if (pausing && !isPausedRecognizer) { - + } else if (pausing && !isPausedRecognizer) { + // Otherwise if we are pausing and it is running - don't pause a paused thread + // this would end up in a deadlock - This can happen if there are nested calls + // + // Wait for the thread to pause - Once inside this block we know the + // recognizerThread is sitting at PAUSED_TAG (search text) isPausedRecognizer = true; synchronized (recognizerThread) { - // Wait for the monitor - i.e. the thread to yield Log.i("ASR", "Paused Thread"); // Ensure hypothesis output queue is emptied so there is nothing to process while paused - // mainHandler.removeCallbacks(prevHypothesisEvt); } } } - } - /** * This kills the recognizer thread and sets it for GC * @return */ private boolean stopRecognizerThread() { - if (null == recognizerThread) - return false; + if (null == recognizerThread) return false; try { // Ensure the recognizerThread is not in the paused state @@ -281,7 +246,6 @@ private boolean stopRecognizerThread() { isRunningRecognizer = false; setPauseRecognizer(false); recognizerThread.join(); // waits until it finishes - } catch (InterruptedException e) { Log.i("ASR", "Stop Exception: " + e); // Restore the interrupted status. @@ -292,12 +256,11 @@ private boolean stopRecognizerThread() { // there is a new recognizer thread created. // recognizerThread = null; - isDecoding = false; + isDecoding = false; return true; } - /** * Stops recognition. All listeners should receive final result if there is * any. Does nothing if recognition is not active. @@ -305,7 +268,6 @@ private boolean stopRecognizerThread() { * @return true if recognition was actually stopped */ public boolean stop() { - Log.i("ASR", "Stop Recognition Thread"); boolean result = stopRecognizerThread(); @@ -317,14 +279,11 @@ public boolean stop() { // Note that the way the listener architecture works currently you will have stopped // processing results by the time this happens so in general you don't want the final // hypothesis. - // - if (wantFinal) - postResult(hypothesis, TCONST.FINAL_HYPOTHESIS); + if (wantFinal) postResult(hypothesis, TCONST.FINAL_HYPOTHESIS); } return result; } - /** * Cancels recognition. Listeners do not receive final result. Does nothing * if recognition is not active. @@ -332,17 +291,13 @@ public boolean stop() { * @return true if recognition was actually canceled */ public boolean cancel() { - Log.i("ASR", "Stop Recognition Thread"); boolean result = stopRecognizerThread(); - if (result) { - Log.i("ASR", "Cancelled recognition"); - } + if (result) Log.i("ASR", "Cancelled recognition"); return result; } - /** * Gets name of the currently active search. * @@ -352,7 +307,6 @@ public String getSearchName() { return decoder.getSearch(); } - /** * Add search based on an fsg language model */ @@ -393,7 +347,6 @@ public void addKeyphraseSearch(String name, String phrase) { decoder.setKeyphrase(name, phrase); } - /** * Adds search based on a keyphrase file. * @@ -404,115 +357,84 @@ public void addKeywordSearch(String name, File file) { decoder.setKws(name, file.getPath()); } - /** * Access the listener active state * * @return */ public synchronized boolean isListening() { - - boolean _listening = false; - - if (isRunningRecognizer && !isPausedRecognizer) { - _listening = true; - } - - return _listening; + return (isRunningRecognizer && !isPausedRecognizer); } - /** * */ private final class RecognizerThread extends Thread { private final String label; // label for the current capture - private boolean isRecording = false; - private long ASRTimer; // Used for benchmarking + private boolean isRecording = false; + private long ASRTimer; // Used for benchmarking // constructor stores utterance id used to name capture file public RecognizerThread(String uttid, String rawLogDir) { - label = uttid; + label = uttid; isPausedRecognizer = false; } @Override public void run() { - synchronized (recognizerThread) { + int nread; - int nread; - - boolean flagsDirty = true; - boolean inSpeech = false; - short[] buffer = new short[BUFFER_SIZE]; - - String lastAudioEvent = TCONST.UNKNOWN_TYPE; + boolean inSpeech = false; + short[] buffer = new short[BUFFER_SIZE]; // AudioRecord recorder = null; // try { // recorder = new AudioRecord(AudioSource.VOICE_RECOGNITION, sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, 8192); -// } -// catch (Exception e) { +// } catch (Exception e) { // Log.d("ASR", "AudioRecorder Create Failed: " + e); // } isRunningRecognizer = true; // Collect audio samples continuously while not paused and until the - // Thread is killed. This allow UI/UX activity while the listener is still + // Thread is killed. This allow UI/UX activity while the listener is still // listening to the mic - // while (isRunningRecognizer) { - - // We always start the thread in the paused state - // + // We never start the thread in the paused state if (isPausedRecognizer) { - try { // recorder.stop(); isRecording = false; - // If we are starting a new utterance stop recording and - // flush the input - i.e. clear the recorder - // + // If we are starting a new utterance stop recording and flush the input - i.e. clear the recorder if (!isDecoding) { - // recorder.stop(); - do { // nread = recorder.read(buffer, 0, buffer.length); + // TODO: see if flushing the buffer still works on pausing with permanent audio recording nread = readBuffer(buffer, BUFFER_SIZE); Log.i("ASR", "Flush buffer: nread = " + nread); } while (nread > 0); } - // PAUSED_TAG - // Notes: // Waits should always be in loops like this // TODO: understand why interrupt causes freeze while in wait state // You should not interrupt() while in a wait - // while (isPausedRecognizer) { Log.i("ASR","Recognizer Paused"); recognizerThread.wait(); } - } catch (InterruptedException e) { Log.i("ASR","Wait Exception"); e.printStackTrace(); } } - // Thread interrupt is not currently used but this is the recommended mechanism - // should it be required. - // - if (isInterrupted()) { - isRunningRecognizer = false; - } + if (isInterrupted()) isRunningRecognizer = false; if (!isRunningRecognizer) { Log.d("ASR", "Terminating ASR Thread"); @@ -522,61 +444,50 @@ public void run() { // We start the thread with the decoder stopped and also when we // restart for a new utterance - we end the utterance prior to // the decoder fsgsearch update - // if (!isDecoding) { Log.i("ASR","Start Decoder"); // label utterance with passed-in id decoder.startUtt(); - inSpeech = false; - prevHypothesis = null; - prevAsrWords = new String[0]; - wordLastChanged = new ArrayList(); - lastAudioEvent = TCONST.UNKNOWN_TYPE; - isDecoding = true; + inSpeech = false; + prevHypothesis = null; + prevAsrWords = new String[0]; + wordLastChanged = new ArrayList<>(); + isDecoding = true; } // Ensure we are recording while the thread is running. - // if (!isRecording) { - // Don't want to do this - misses front end of utterance // // Flush the input buffer // int readf = recorder.read(buffer, 0, buffer.length); // -// if (readf > 0) { -// Log.i("ASR", "Flushed input buffer: " + readf); -// } - +// if (readf > 0) Log.i("ASR", "Flushed input buffer: " + readf); Log.i("ASR", "Resume recording"); // recorder.startRecording(); isRecording = true; - nread = 0; - } - else { + nread = 0; + } else { // Clean out the buffered input // nread = recorder.read(buffer, 0, buffer.length); nread = readBuffer(buffer, BUFFER_SIZE); - Log.i("ASR", "Clean out buffer: nread = " + nread); } - //Log.i("ASR","ASR RAW-BYTES: " + nread); + // Log.i("ASR","ASR RAW-BYTES: " + nread); if (nread == AudioRecord.ERROR_INVALID_OPERATION || nread == AudioRecord.ERROR_BAD_VALUE) { Log.i("ASR","Read Error"); throw new RuntimeException("error reading audio buffer"); - } else if (nread > 0) { - +// Log.i("ASR", "Read from buffer: nread = " + nread); // This filters low power segments that otherwise cause false positives // in number_speaking tutor - // // double RMS = publishRMS(buffer, nread); // if (RMS > 4) { ASRTimer = System.currentTimeMillis(); decoder.processRaw(buffer, nread, false, false); - Log.d("ASR", "Time in processRaw: " + (System.currentTimeMillis() - ASRTimer)); +// Log.d("ASR", "Time in processRaw: " + (System.currentTimeMillis() - ASRTimer)); nSamples += nread; @@ -584,7 +495,6 @@ public void run() { // i.e. false means relative silence - // if (decoder.getInSpeech() != inSpeech) { - inSpeech = decoder.getInSpeech(); // Measure times from @@ -597,7 +507,6 @@ public void run() { eventManager.fireStaticEvent(TCONST.SOUND_EVENT); eventManager.updateStartTime(TCONST.TIMEDSOUND_EVENT, TCONST.TIMEDSILENCE_EVENT); // Hearing a sound resets the silence timer - } else { eventManager.fireStaticEvent(TCONST.SILENCE_EVENT); eventManager.updateStartTime(TCONST.TIMEDSILENCE_EVENT, TCONST.UNKNOWNEVENT_TYPE); @@ -608,7 +517,7 @@ public void run() { // ASRTimer = System.currentTimeMillis(); Hypothesis hypothesis = decoder.hyp(); - Log.d("ASR", "Time in Decoder: " + (System.currentTimeMillis() - ASRTimer)); +// Log.d("ASR", "Time in Decoder: " + (System.currentTimeMillis() - ASRTimer)); // If there is a valid hypothesis string from the decoder continue // Once the decoder returns a hypothesis it will not go back to @@ -629,7 +538,6 @@ public void run() { } // While running we continuously watch for timed event firings (timeouts) - // eventManager.fireTimedEvents(); } @@ -649,54 +557,36 @@ public void run() { } } - - private void publishRawHypothesis(Hypothesis hypothesis ) { - - boolean hypChanged = false; - String hypString; - - // Reset the Hypothesis Flag - We don't want to emit events unless - // there has been an actual change of hypothesis - // - hypChanged = false; - - // DEBUG - hypString = hypothesis.getHypstr(); + private void publishRawHypothesis(Hypothesis hypothesis) { + boolean hypChanged = false; + String hypString = hypothesis.getHypstr(); // If this is the first Hypothesis // Record it so we can test for changes and set hypchanged flag - // if (prevHypothesis == null) { prevHypothesis = hypothesis; - hypChanged = true; + hypChanged = true; Log.i("ASR", "First Hypothesis: " + hypString); - - } else { - + } else if (prevHypothesis.getHypstr().equals(hypothesis.getHypstr())) { // If the hypothesis hasn't changed they have stopped speaking. // or are speaking Noise - not intelligible words - // - if (prevHypothesis.getHypstr().equals(hypothesis.getHypstr())) { - hypChanged = false; - //Log.i("ASR","Same Hypothesis: " + hypString); + hypChanged = false; - } else { - hypChanged = true; - prevHypothesis = hypothesis; + Log.i("ASR", "Same Hypothesis: " + hypString); + } else { + hypChanged = true; + prevHypothesis = hypothesis; - Log.i("ASR", "Updated Hypothesis: " + hypString); - } + Log.i("ASR", "Updated Hypothesis: " + hypString); } // If the hypothesis has changed let the client know // Update the eventTimer to indicate the last thing that happened // Updating the word event resets silence and noise - // if (hypChanged) { eventManager.fireStaticEvent(TCONST.WORD_EVENT); - eventManager.updateStartTime(TCONST.TIMEDWORD_EVENT, - TCONST.TIMEDSILENCE_EVENT | TCONST.TIMEDSOUND_EVENT); + eventManager.updateStartTime(TCONST.TIMEDWORD_EVENT, TCONST.TIMEDSILENCE_EVENT | TCONST.TIMEDSOUND_EVENT); Log.i("ASR", "Processing Hypothesis"); @@ -705,100 +595,77 @@ private void publishRawHypothesis(Hypothesis hypothesis ) { } } - private void publishStableHypothesis(Hypothesis hypothesis) { + ArrayList resultSet = new ArrayList<>();; - ArrayList resultSet = new ArrayList();; - - boolean hypStable = false; - boolean newStableHyp = false; - String hypString; - + boolean newStableHyp = false; + String hypString = hypothesis.getHypstr(); // get the array of hypothesis words - // get the array of hypothesis words - // - hypString = hypothesis.getHypstr(); - - Log.d("STABLE", "HYP LIST: " + hypString); + Log.d("ASR", "HYP LIST: " + hypString); String[] asrWords = hypString.split("\\s+"); - long currTime = System.currentTimeMillis(); - // For new hypothesis words record their last changed time. - // if (asrWords.length > prevAsrWords.length) { - for (int i1 = prevAsrWords.length; i1 < asrWords.length; i1++) { wordLastChanged.add(currTime); - Log.d("STABLE", "Word Start: " + asrWords[i1]); + + Log.d("ASR", "Word Start: " + asrWords[i1]); } } - //Log.d("STABLE", "asrWords : " + TextUtils.join(" ", asrWords)); - //Log.d("STABLE", "prevAsrWords: " + TextUtils.join(" ", prevAsrWords)); + Log.d("ASR", "asrWords : " + TextUtils.join(" ", asrWords)); + Log.d("ASR", "prevAsrWords: " + TextUtils.join(" ", prevAsrWords)); - // Scan for new sequential words that have past their stable time tests - // (i.e. period which they have been unchanged) + // Scan for new sequential words that have past their stable time tests (i.e. period which they have been unchanged) // Note that asrWords can contract so we have to update the scan limit based on both - // int maxScan = Math.min(prevAsrWords.length, asrWords.length); for (int i1 = 0; i1 < maxScan; i1++) { - // If the word has changed - update its last changed time. - // - if (!asrWords[i1].equals(prevAsrWords[i1])) { + if (!asrWords[i1].equals(prevAsrWords[i1]) && !("START_" + asrWords[i1]).equals(prevAsrWords[i1])) { wordLastChanged.set(i1, currTime); - Log.d("STABLE", "Word Changed: " + asrWords[i1] + " : " + prevAsrWords[i1]); - break; - } - // Otherwise add the word to the potential update hypothesis - // We only want to emit hypothesis sets with added, new entries. - // - else { + Log.d("ASR", "Word Changed: " + asrWords[i1] + " from: " + prevAsrWords[i1]); + + break; + } else if ((currTime - wordLastChanged.get(i1)) > TCONST.STABLE_TIME) { // Find words that have passed their stable test and set their lastChanged - // so they will not trigger on the next pass. Then we emit all previously - // processed words and any newly triggered words. - // - if ((currTime - wordLastChanged.get(i1)) > TCONST.STABLE_TIME) { - wordLastChanged.set(i1, Long.MAX_VALUE); - newStableHyp = true; + // so they will not trigger on the next pass. Then we emit any newly triggered words. + wordLastChanged.set(i1, Long.MAX_VALUE); + newStableHyp = true; + resultSet.add(asrWords[i1]); - resultSet.add(asrWords[i1]); - Log.d("STABLE", "New word: " + asrWords[i1]); - } + Log.d("ASR", "New word: " + asrWords[i1]); + } else if (wordLastChanged.get(i1) != Long.MAX_VALUE) { // Don't look past the last word that hasn't passed its test // i.e. we process stable words sequentially - this could be made // optional if you want to have a look ahead. - // - else if (wordLastChanged.get(i1) != Long.MAX_VALUE) { - Log.d("STABLE", "unstable word: " + asrWords[i1]); - break; - } else { - resultSet.add(asrWords[i1]); - Log.d("STABLE", "old word: " + asrWords[i1]); - } + Log.d("ASR", "unstable word: " + asrWords[i1]); + + break; + } else { + // Otherwise add the word to the potential update hypothesis + // We only want to emit hypothesis sets with added, new entries. + resultSet.add(asrWords[i1]); + + Log.d("ASR", "old word: " + asrWords[i1]); } } // Update the current word set - // prevAsrWords = asrWords; // If the hypothesis has changed let the client know // Update the eventTimer to indicate the last event that occurred // Updating the word event resets silence and noise - // if (newStableHyp) { eventManager.fireStaticEvent(TCONST.WORD_EVENT); - eventManager.updateStartTime(TCONST.TIMEDWORD_EVENT, - TCONST.TIMEDSILENCE_EVENT | TCONST.TIMEDSOUND_EVENT); + eventManager.updateStartTime(TCONST.TIMEDWORD_EVENT, TCONST.TIMEDSILENCE_EVENT | TCONST.TIMEDSOUND_EVENT); - Log.d("STABLE", "Processing Hypothesis: " + TextUtils.join(" ", resultSet)); + Log.d("ASR", "Processing Hypothesis: " + TextUtils.join(" ", resultSet)); // If there is a new Hypothesis then process it in the subclass of ListenerBase try { @@ -809,12 +676,10 @@ else if (wordLastChanged.get(i1) != Long.MAX_VALUE) { } } - private double publishRMS(short[] buffer, int count) { - double RMS = 0; double sum = 0; - Short peak= 0; + Short peak = 0; if (count > 0) { for (int i1 = 0; i1 < count; i1++) { @@ -822,11 +687,8 @@ private double publishRMS(short[] buffer, int count) { sum = Math.pow(sample, 2); - if (sample > peak) - peak = sample; - - if (sample > mPeak) - mPeak = sample; + if (sample > peak) peak = sample; + if (sample > mPeak) mPeak = sample; } RMS = Math.sqrt(sum / count); @@ -837,14 +699,13 @@ private double publishRMS(short[] buffer, int count) { return RMS; } - /** * Manage Looper queue so we don't have unprocessed hypotheses stacking up * Throw away previous hypothesis and only process the new one. * - * This is done to manage degenerate cases where the hyopthesis becomes + * This is done to manage degenerate cases where the hypothesis becomes * very long and MultiMatch is being employed to process it. In these - * cirucmstances MM can take prolonged periods to process a result. + * circumstances MM can take prolonged periods to process a result. * based on current MM design (Mar 2016) * * Note this purging is a good idea in any case as a stacked hypothesis is just a @@ -854,7 +715,6 @@ private double publishRMS(short[] buffer, int count) { * @param resultType */ private void postResult(Hypothesis hypothesis, String resultType) { - // If there is a new Hypothesis then process it // Note- initial null is ignored in removeCallBacks nextHypothesisEvt = new ResultEvent(hypothesis, resultType); @@ -863,7 +723,6 @@ private void postResult(Hypothesis hypothesis, String resultType) { } private void postResult(String[] hypothesisSet, String resultType) { - // If there is a new Hypothesis then process it // Note- initial null is ignored in removeCallBacks nextHypothesisEvt = new ResultEvent(hypothesisSet, resultType); @@ -877,7 +736,6 @@ private void postResult(String[] hypothesisSet, String resultType) { * @param nextHypothesisEvt */ private void enQueueResult(ResultEvent nextHypothesisEvt) { - // remove last hypothesis if it hasn't been processed mainHandler.removeCallbacks(prevHypothesisEvt); mainHandler.post(nextHypothesisEvt); @@ -885,18 +743,18 @@ private void enQueueResult(ResultEvent nextHypothesisEvt) { prevHypothesisEvt = nextHypothesisEvt; } - private abstract class RecognitionEvent implements Runnable { + public void run() { ITutorListener[] emptyArray = new ITutorListener[0]; - for (ITutorListener listener : listeners.toArray(emptyArray)) - execute(listener); + for (ITutorListener listener : listeners.toArray(emptyArray)) execute(listener); } protected abstract void execute(ITutorListener listener); } private class InSpeechChangeEvent extends RecognitionEvent { + private final boolean state; InSpeechChangeEvent(boolean state) { @@ -905,42 +763,44 @@ private class InSpeechChangeEvent extends RecognitionEvent { @Override protected void execute(ITutorListener listener) { - if (state) - listener.onBeginningOfSpeech(); - else - listener.onEndOfSpeech(); + if (state) listener.onBeginningOfSpeech(); + else listener.onEndOfSpeech(); } } private class ResultEvent extends RecognitionEvent { + protected final Hypothesis hypothesis; - protected final String[] hypothesisSet; - private final String resultType; + protected final String[] hypothesisSet; + private final String resultType; + ResultEvent(Hypothesis _hypothesis, String _resultType) { - this.hypothesis = _hypothesis; + this.hypothesis = _hypothesis; this.hypothesisSet = null; - this.resultType = _resultType; + this.resultType = _resultType; } ResultEvent(String[] _hypothesisSet, String _resultType) { - this.hypothesis = null; + this.hypothesis = null; this.hypothesisSet = _hypothesisSet; - this.resultType = _resultType; + this.resultType = _resultType; } + @Override protected void execute(ITutorListener listener) { - Log.d("ASR", "Handle Recognizer ResultEvent"); switch (resultType) { case TCONST.FINAL_HYPOTHESIS: listener.onResult(hypothesis); break; + case TCONST.PARTIAL_HYPOTHESIS: listener.onPartialResult(hypothesis); break; + case TCONST.STABLE_HYPOTHESIS: listener.onStableResult(hypothesisSet); break; @@ -949,6 +809,7 @@ protected void execute(ITutorListener listener) { } private class timeOutEvent extends RecognitionEvent { + protected final int eventType; @@ -963,7 +824,6 @@ protected void execute(ITutorListener listener) { } } - public void configTimedEvent(int eventType, long newTimeout) { eventManager.configTimedEvent(eventType, newTimeout); } @@ -995,17 +855,17 @@ public void resetStaticEvent(int eventType) { */ private class ASREvents { - int lastAudioEvent; + int lastAudioEvent; private long audioEventTimer; private boolean listenForSilence = false; - private boolean listenForSound = false; - private boolean listenForWords = false; + private boolean listenForSound = false; + private boolean listenForWords = false; - private boolean WaitAfterStart = false; + private boolean WaitAfterStart = false; private boolean WaitAfterSilence = false; - private boolean WaitAfterSound = false; - private boolean WaitAfterWord = false; + private boolean WaitAfterSound = false; + private boolean WaitAfterWord = false; private long lastWordHeard; private long lastSoundHeard; @@ -1022,10 +882,10 @@ private class ASREvents { private long wordHeardTimeout; private long startTimeOut; - private boolean isStartTriggered = true; - private boolean isSilenceTriggered = true; - private boolean isNoiseTriggered = false; - private boolean isWordTriggered = false; + private boolean isStartTriggered = true; + private boolean isSilenceTriggered = true; + private boolean isNoiseTriggered = false; + private boolean isWordTriggered = false; public ASREvents() { @@ -1045,84 +905,76 @@ public ASREvents() { * @param newTimeout */ public synchronized void configTimedEvent(int eventType, long newTimeout) { - switch (eventType) { - case TCONST.TIMEDSTART_EVENT: Log.d("ASR", "CONFIG TIMED START: " + newTimeout); - startTimeOut = newTimeout; - WaitAfterStart = true; + startTimeOut = newTimeout; + WaitAfterStart = true; // isStartTriggered = false; This is only ever done once break; case TCONST.TIMEDSILENCE_EVENT: Log.d("ASR", "CONFIG TIMED SILENCE: " + newTimeout); - silenceTimeout = newTimeout; - WaitAfterSilence = true; + silenceTimeout = newTimeout; + WaitAfterSilence = true; isSilenceTriggered = false; break; case TCONST.TIMEDSOUND_EVENT: Log.d("ASR", "CONFIG TIMED SOUND: " + newTimeout); - NoiseTimeout = newTimeout; - WaitAfterSound = true; + NoiseTimeout = newTimeout; + WaitAfterSound = true; isNoiseTriggered = false; break; case TCONST.TIMEDWORD_EVENT: Log.d("ASR", "CONFIG TIMED WORD: " + newTimeout); - wordHeardTimeout = newTimeout; - WaitAfterWord = true; + wordHeardTimeout = newTimeout; + WaitAfterWord = true; isWordTriggered = false; break; } } - /** * Reset and disable any * * @param resetMap */ public synchronized void resetTimedEvent(int resetMap) { - if ((resetMap & TCONST.TIMEDSTART_EVENT) != 0) { - Log.d("ASR", "RESET TIMED START: "); - WaitAfterStart = false; + WaitAfterStart = false; isStartTriggered = false; - startTimeOut = Long.MAX_VALUE; - startTime = Long.MAX_VALUE; + startTimeOut = Long.MAX_VALUE; + startTime = Long.MAX_VALUE; } if ((resetMap & TCONST.TIMEDSILENCE_EVENT) != 0) { - Log.d("ASR", "RESET TIMED SILENCE: "); - WaitAfterSilence = false; + WaitAfterSilence = false; isSilenceTriggered = false; - silenceTimeout = Long.MAX_VALUE; - lastSilence = Long.MAX_VALUE; + silenceTimeout = Long.MAX_VALUE; + lastSilence = Long.MAX_VALUE; } if ((resetMap & TCONST.TIMEDSOUND_EVENT) != 0) { Log.d("ASR", "RESET TIMED SOUND: "); - WaitAfterSound = false; + WaitAfterSound = false; isNoiseTriggered = false; - NoiseTimeout = Long.MAX_VALUE; - lastSoundHeard = Long.MAX_VALUE; + NoiseTimeout = Long.MAX_VALUE; + lastSoundHeard = Long.MAX_VALUE; } if ((resetMap & TCONST.TIMEDWORD_EVENT) != 0) { - Log.d("ASR", "RESET TIMED WORD: "); - WaitAfterWord = false; - isWordTriggered = false; + WaitAfterWord = false; + isWordTriggered = false; wordHeardTimeout = Long.MAX_VALUE; - lastWordHeard = Long.MAX_VALUE; + lastWordHeard = Long.MAX_VALUE; } } - /** * Enable or disable various static event types. These are events that occur whenever a * defined state is entered. @@ -1131,25 +983,21 @@ public synchronized void resetTimedEvent(int resetMap) { * @param listen */ public synchronized void configStaticEvent(int eventType, boolean listen) { - switch (eventType) { - case TCONST.SILENCE_EVENT: - listenForSilence = listen; + listenForSilence = listen; break; case TCONST.SOUND_EVENT: - listenForSound = listen; + listenForSound = listen; break; case TCONST.WORD_EVENT: - listenForWords = listen; + listenForWords = listen; break; - } } - /** * Update the start times for timed events * @@ -1158,13 +1006,13 @@ public synchronized void configStaticEvent(int eventType, boolean listen) { * Word = time since last hypothesis change * * This is where timed event types are "triggered" which sets their start time and their - * triggered flag which is consummed in fireTimedEvents + * triggered flag which is consumed in fireTimedEvents * * The timed silence event is the time in total silence - which is actually a difficult * state to stay in - if the mic picks up anything we leave this state - restarted each * time you enter silence. * - * The timed sound event is the time since any sound aboce threshold was heard and may + * The timed sound event is the time since any sound above threshold was heard and may * include a silence gap after. After a silence gap this would be restarted each * time the mic exceeds threshold. * @@ -1187,8 +1035,7 @@ public synchronized void configStaticEvent(int eventType, boolean listen) { * @param resetMap */ public synchronized void updateStartTime(int eventType, int resetMap) { - - lastAudioEvent = eventType; + lastAudioEvent = eventType; audioEventTimer = System.currentTimeMillis(); @@ -1199,37 +1046,41 @@ public synchronized void updateStartTime(int eventType, int resetMap) { case TCONST.TIMEDSTART_EVENT: isStartTriggered = true; - startTime = audioEventTimer; + startTime = audioEventTimer; break; + case TCONST.TIMEDSILENCE_EVENT: isSilenceTriggered = true; - lastSilence = audioEventTimer; + lastSilence = audioEventTimer; break; + case TCONST.TIMEDSOUND_EVENT: isNoiseTriggered = true; - lastSoundHeard = audioEventTimer; + lastSoundHeard = audioEventTimer; break; + case TCONST.TIMEDWORD_EVENT: isWordTriggered = true; - lastWordHeard = audioEventTimer; + lastWordHeard = audioEventTimer; break; } if ((resetMap & TCONST.TIMEDSILENCE_EVENT) != 0) { isSilenceTriggered = false; - lastSilence = Long.MAX_VALUE; + lastSilence = Long.MAX_VALUE; } + if ((resetMap & TCONST.TIMEDSOUND_EVENT) != 0) { isNoiseTriggered = false; - lastSoundHeard = Long.MAX_VALUE; + lastSoundHeard = Long.MAX_VALUE; } + if ((resetMap & TCONST.TIMEDWORD_EVENT) != 0) { isWordTriggered = false; - lastWordHeard = Long.MAX_VALUE; + lastWordHeard = Long.MAX_VALUE; } } - /** * Fire the single event that indicate a particular state has begun * At the moment these are one shot events - i.e. reset when fired @@ -1237,7 +1088,6 @@ public synchronized void updateStartTime(int eventType, int resetMap) { * @param eventType */ public synchronized void fireStaticEvent(int eventType) { - switch (eventType) { case TCONST.SILENCE_EVENT: if (listenForSilence) { @@ -1245,15 +1095,17 @@ public synchronized void fireStaticEvent(int eventType) { mainHandler.post(new timeOutEvent(TCONST.SILENCE_EVENT)); } break; + case TCONST.SOUND_EVENT: if (listenForSound) { Log.i("ASR", "Sound Heard"); mainHandler.post(new timeOutEvent(TCONST.SOUND_EVENT)); } break; + case TCONST.WORD_EVENT: if (listenForWords) { - Log.i("ASR", "Word Heard - Hyp updated"); + Log.i("ASR", "Word Heard - Hypothesis Updated"); mainHandler.post(new timeOutEvent(TCONST.WORD_EVENT)); } break; @@ -1264,9 +1116,8 @@ public synchronized void fireStaticEvent(int eventType) { configStaticEvent(eventType, false); } - /** - * We ccnstantly watch for timed events within the reognizer thread. This is called on each + * We constantly watch for timed events within the recognizer thread. This is called on each * iteration and the "WaitAfter..." flags dictate which timed events are actively watched. * * At the moment these are one shot events - i.e. reset when fired @@ -1276,7 +1127,6 @@ public synchronized void fireStaticEvent(int eventType) { * See #configTimedEvent */ public synchronized void fireTimedEvents() { - // startTime is the listener last started listening // lastSilence is the time since the mic went silent and stayed silent // lastSoundHeard is the time since the mic started hearing sound @@ -1284,43 +1134,33 @@ public synchronized void fireTimedEvents() { long time = System.currentTimeMillis(); - startGap = (time - startTime); + startGap = (time - startTime); silenceGap = (time - lastSilence); - NoiseGap = (time - lastSoundHeard); + NoiseGap = (time - lastSoundHeard); attemptGap = (time - lastWordHeard); if (WaitAfterStart && isStartTriggered) { - if (startGap > startTimeOut) { resetTimedEvent(TCONST.TIMEDSTART_EVENT); Log.i("ASR", "Start Timout Fired"); mainHandler.post(new timeOutEvent(TCONST.TIMEDSTART_EVENT)); } - } - - else if (WaitAfterSilence && isSilenceTriggered) { - + } else if (WaitAfterSilence && isSilenceTriggered) { if (silenceGap > silenceTimeout) { resetTimedEvent(TCONST.TIMEDSILENCE_EVENT); Log.i("ASR", "Silence Timout Fired"); mainHandler.post(new timeOutEvent(TCONST.TIMEDSILENCE_EVENT)); } - } - - else if (WaitAfterSound && isNoiseTriggered) { - + } else if (WaitAfterSound && isNoiseTriggered) { if (NoiseGap > NoiseTimeout) { resetTimedEvent(TCONST.TIMEDSOUND_EVENT); Log.i("ASR", "Noise Timout Fired"); mainHandler.post(new timeOutEvent(TCONST.TIMEDSOUND_EVENT)); } - } - - else if (WaitAfterWord && isWordTriggered) { - + } else if (WaitAfterWord && isWordTriggered) { if (attemptGap > wordHeardTimeout) { resetTimedEvent(TCONST.TIMEDWORD_EVENT); @@ -1328,8 +1168,6 @@ else if (WaitAfterWord && isWordTriggered) { mainHandler.post(new timeOutEvent(TCONST.TIMEDWORD_EVENT)); } } - } } - } diff --git a/comp_logging/src/main/java/cmu/xprize/comp_logging/CAudioLogThread.java b/comp_logging/src/main/java/cmu/xprize/comp_logging/CAudioLogThread.java index b99eb49da..abced8cf9 100644 --- a/comp_logging/src/main/java/cmu/xprize/comp_logging/CAudioLogThread.java +++ b/comp_logging/src/main/java/cmu/xprize/comp_logging/CAudioLogThread.java @@ -62,7 +62,7 @@ public void run() { readCount = 0; } else { readCount = recorder.read(buffer, 0, BUFFER_SIZE); - Log.i("AudioLog", "Read from recorder: read_count = " + readCount); +// Log.i("AudioLog", "Read from recorder: read_count = " + readCount); } if (readCount == AudioRecord.ERROR_INVALID_OPERATION || readCount == AudioRecord.ERROR_BAD_VALUE) { diff --git a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Content.java b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Content.java index 75fcc4d53..9ae70d5fc 100644 --- a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Content.java +++ b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Content.java @@ -30,6 +30,7 @@ public class CASB_Content implements ILoadableObject { // json loadable public CASB_Narration narration[]; public String sentence; + public String variant; @Override public void loadJSON(JSONObject jsonObj, IScope scope) { diff --git a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Seg.java b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Seg.java index ffced5f17..b7b4a461c 100644 --- a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Seg.java +++ b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_Seg.java @@ -34,7 +34,6 @@ public class CASB_Seg implements ILoadableObject { @Override public void loadJSON(JSONObject jsonObj, IScope scope) { - JSON_Helper.parseSelf(jsonObj, this, CClassMap.classMap, scope); } } diff --git a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_data.java b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_data.java index 65737d9d5..fd03d5a46 100644 --- a/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_data.java +++ b/comp_reading/src/main/java/cmu/xprize/rt_component/CASB_data.java @@ -30,6 +30,7 @@ public class CASB_data implements ILoadableObject { public CASB_Content text[][]; public String image; public String prompt; + public String variant; @Override public void loadJSON(JSONObject jsonObj, IScope scope) { diff --git a/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_Component.java b/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_Component.java index 243291e75..83235814b 100644 --- a/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_Component.java +++ b/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_Component.java @@ -33,7 +33,6 @@ import java.util.ArrayList; import java.util.HashMap; -import java.util.Iterator; import java.util.Map; import cmu.xprize.comp_logging.CErrorManager; @@ -93,21 +92,15 @@ public class CRt_Component extends ViewAnimator implements IEventListener, IVMan protected boolean _scrollVertical = false; - private Animation slide_left_to_right; private Animation slide_right_to_left; private Animation slide_bottom_up; private Animation slide_top_down; - // json loadable - // public CData_Index[] dataSource; - - // This is used to map "type" (class names) in the index to real classes - // static public HashMap viewClassMap = new HashMap(); static { @@ -130,20 +123,17 @@ public CRt_Component(Context context, AttributeSet attrs) { public void init(Context context, AttributeSet attrs) { - inflate(getContext(), R.layout.rt__component, this); mContext = context; - slide_left_to_right = AnimationUtils.loadAnimation(mContext, R.anim.slide_left_to_right); - slide_right_to_left = AnimationUtils.loadAnimation(mContext, R.anim.slide_right_to_left); - slide_top_down = AnimationUtils.loadAnimation(mContext, R.anim.slide_top_down); - slide_bottom_up = AnimationUtils.loadAnimation(mContext, R.anim.slide_bottom_up); + slide_left_to_right = AnimationUtils.loadAnimation(mContext, R.anim.slide_left_to_right); + slide_right_to_left = AnimationUtils.loadAnimation(mContext, R.anim.slide_right_to_left); + slide_top_down = AnimationUtils.loadAnimation(mContext, R.anim.slide_top_down); + slide_bottom_up = AnimationUtils.loadAnimation(mContext, R.anim.slide_bottom_up); } - public void onDestroy() { - terminateQueue(); if (mListener != null) { @@ -155,13 +145,10 @@ public void onDestroy() { mViewManager.onDestroy(); mViewManager = null; } - //mSynthesizer.shutDown(); } - protected void prepareListener(TTSsynthesizer rootTTS) { - // Generate a Project Listen type listener // Attach the speech recognizer. mListener = new ListenerPLRT(); @@ -169,19 +156,16 @@ protected void prepareListener(TTSsynthesizer rootTTS) { mListener.setEventListener(this); // Have connector sub-class in the tutor domain Inject the listener into the MediaManager - // setListener(mListener); // attach TTS mSynthesizer = rootTTS; } - public void nextScene() { } public void nextNode() { - } /** @@ -189,24 +173,21 @@ public void nextNode() { * @param language Feature string (e.g. LANG_EN) */ public void configListenerLanguage(String language) { - // Configure the mListener for our storyName - // mListener.setLanguage(language); } - /** - * @Override in Tutor Domain to allow the MediaManageer direct access to the recognizer + * @Override in Tutor Domain to allow the MediaManager direct access to the recognizer */ - public void setListener(ListenerBase listener) {} - + public void setListener(ListenerBase listener) { + } /** - * @Override in Tutor Domain to allow the MediaManageer direct access to the recognizer + * @Override in Tutor Domain to allow the MediaManager direct access to the recognizer */ - public void removeListener(ListenerBase listener) {} - + public void removeListener(ListenerBase listener) { + } //************************************************* @@ -217,49 +198,47 @@ public void removeListener(ListenerBase listener) {} * @param feature * @param fadd */ - public void setFeature(String feature, boolean fadd) {} + public void setFeature(String feature, boolean fadd) { + } + public boolean testFeature(String feature) { return false; } + public String getTutorVariant() { + return ""; + } - public int addPage(View newView) { + public void setTutorFeatures(String variant) { + } + public int addPage(View newView) { int insertNdx = super.getChildCount(); super.addView((View) newView, insertNdx); return insertNdx; } - /** * * @param forward * @param index */ public void animatePageFlip(boolean forward, int index) { - if (forward) { - if (_scrollVertical) - setInAnimation(slide_bottom_up); - else - setInAnimation(slide_right_to_left); - } - else { - if (_scrollVertical) - setInAnimation(slide_top_down); - else - setInAnimation(slide_left_to_right); + if (_scrollVertical) setInAnimation(slide_bottom_up); + else setInAnimation(slide_right_to_left); + } else { + if (_scrollVertical) setInAnimation(slide_top_down); + else setInAnimation(slide_left_to_right); } setDisplayedChild(index); } - //****** ViewManager Support - START //************************************************* - //************************************************************************ //************************************************************************ // IBehaviorManager Interface START @@ -268,10 +247,8 @@ public void animatePageFlip(boolean forward, int index) { * Overridden in TClass to fire graph behaviors * */ - public boolean applyBehavior(String event){ - - boolean result = false; - return result; + public boolean applyBehavior(String event) { + return false; } /** @@ -282,18 +259,15 @@ public boolean applyBehavior(String event){ public void applyBehaviorNode(String nodeName) { } - // IBehaviorManager Interface END //************************************************************************ //************************************************************************ - //************************************************* //****** Activity state support START protected void onPause() { - // stop listening abortively whenever app pauses or stops (moves to background) if (mListener != null) { mListener.deleteLogFiles(); @@ -301,23 +275,18 @@ protected void onPause() { } } - protected void onStop() { } - protected void onResume() { } - protected void onRestart() { - //mViewManager.switchSentence(currentIndex); } - /* Following saves state over possible destroy/recreate cycle, - * which occurs most commonly on portrait/landscape change. + * which occurs most commonly on portrait/landscape change. * We save current sentence (though no credit state) in order to restart from there */ protected void onSaveInstanceState(Bundle state) { @@ -325,22 +294,20 @@ protected void onSaveInstanceState(Bundle state) { // state.putInt("currentIndex", currentIndex); // just save the current sentence index. } - //****** Activity state support END //************************************************* - //**************************************************************************** //********************* Speech Recognition Interface (ASR) - Start @Override - public void onBeginningOfSpeech() {} - + public void onBeginningOfSpeech() { + } @Override - public void onEndOfSpeech() {} - + public void onEndOfSpeech() { + } /** * Route ASR events to the appropriate ViewMannager for the content type @@ -350,11 +317,9 @@ public void onEndOfSpeech() {} */ @Override public void onUpdate(ListenerBase.HeardWord[] heardWords, boolean finalResult) { - mViewManager.onUpdate(heardWords, finalResult); // update current sentence state and redraw } - /** * Route ASR events to the appropriate ViewMannager for the content type * @@ -363,20 +328,16 @@ public void onUpdate(ListenerBase.HeardWord[] heardWords, boolean finalResult) { */ @Override public void onUpdate(String[] heardWords, boolean finalResult) { - mViewManager.onUpdate(heardWords, finalResult); // update current sentence state and redraw } - @Override public void onASREvent(int eventType) { - Log.d(TAG, "onASREvent: " + eventType); // Here we have to convert from bitmapped event types to string types // switch (eventType) { - case TCONST.RECOGNITION_EVENT: applyBehavior(TCONST.ASR_RECOGNITION_EVENT); break; @@ -411,90 +372,70 @@ public void onASREvent(int eventType) { } } - //********************* Speech Recognition Interface - End //**************************************************************************** - //************************************************************************ //************************************************************************ // Tutor Scriptable methods Start - public void startStory() { mViewManager.startStory(); } public void speakTargetSentence() { // to speak the entire Target word sentence - } - - /** - */ public void speakTargetWord() { - } - // Must override in TClass // TClass domain where TScope lives providing access to tutor scriptables - // public void onButtonClick(String buttonName) { } - //************************************************************************ //************************************************************************ // IPublisher - START @Override public void publishState() { - } // Must override in TClass // TClass domain where TScope lives providing access to tutor scriptables - // public void publishValue(String varName, String value) { } // Must override in TClass // TClass domain where TScope lives providing access to tutor scriptables - // public void publishValue(String varName, int value) { } @Override public void publishFeatureSet(String featureset) { - } @Override public void retractFeatureSet(String featureset) { - } @Override public void publishFeature(String feature) { - } @Override public void retractFeature(String feature) { - } @Override public void publishFeatureMap(HashMap featureMap) { - } @Override public void retractFeatureMap(HashMap featureMap) { - } // IPublisher - END @@ -508,21 +449,17 @@ public void retractFeatureMap(HashMap featureMap) { public void updateContext(String sentence, int index, String[] wordList, int wordIndex, String word, int attempts, boolean virtual, boolean correct) { } - // Must override in TClass // TClass domain where TScope lives providing access to tutor scriptables // public void UpdateValue(boolean correct) { } - public void setSpeakButton(String command) { - mViewManager.setSpeakButton(command); } public void setPageFlipButton(String command) { - mViewManager.setPageFlipButton(command); } @@ -531,26 +468,19 @@ public void setPageFlipButton(String command) { //************************************************************************ - protected boolean isCorrect() { - - boolean correct = false; - - return correct; + return false; } - /** - * TODO: this currently only supports extern assets - need to allow for internal assets + * TODO: this currently only supports external assets - need to allow for internal assets * * @param EXTERNPATH */ public void loadStory(String EXTERNPATH, String viewType, String assetLocation) { - loadStory(EXTERNPATH, viewType, assetLocation, null); } - /** * sometimes the storydata.json file is in one repo (assetLocation), but the other assets needed (images) * are in a shared location (sharedAssetLocation) @@ -561,7 +491,6 @@ public void loadStory(String EXTERNPATH, String viewType, String assetLocation) * @param SHAREDEXTERNPATH */ public void loadStory(String EXTERNPATH, String viewType, String assetLocation, String SHAREDEXTERNPATH) { - Log.d(TCONST.DEBUG_STORY_TAG, String.format("assetLocation=%s -- EXTERNPATH=%s", assetLocation, EXTERNPATH)); Class storyClass = viewClassMap.get(viewType); @@ -570,14 +499,13 @@ public void loadStory(String EXTERNPATH, String viewType, String assetLocation, // Generate the View manager for the storyName - specified in the data // // ooooh maybe check if it's math and make text closer to image - mViewManager = (ICRt_ViewManager)storyClass.getConstructor(new Class[]{CRt_Component.class, ListenerBase.class}).newInstance(this,mListener); + mViewManager = (ICRt_ViewManager)storyClass.getConstructor(new Class[]{CRt_Component.class, ListenerBase.class}).newInstance(this, mListener); // ZZZ it loads the story data JUST FINE String jsonData = JSON_Helper.cacheDataByName(EXTERNPATH + TCONST.STORYDATA); Log.d(TCONST.DEBUG_STORY_TAG, "logging jsonData:"); mViewManager.loadJSON(new JSONObject(jsonData), null); - } catch (Exception e) { // TODO: Manage Exceptions CErrorManager.logEvent(TAG, "Story Parse Error: ", e, false); @@ -593,79 +521,53 @@ public void loadStory(String EXTERNPATH, String viewType, String assetLocation, // ZZZ EXTERNPATH = TCONST.EXTERN // ZZZ assetLocation contains storydata.json and images mViewManager.initStory(this, EXTERNPATH, assetLocation); - } - /** - * TODO: this currently only supports extern assets - need to allow for internal assets + * TODO: this currently only supports external assets - need to allow for internal assets * * @param storyName */ public void setStory(String storyName, String assetLocation) { - - for (int i1 = 0 ; i1 < dataSource.length ; i1++ ) { - - if(storyName.equals(dataSource[i1].storyName)) { - + for (int i1 = 0; i1 < dataSource.length; i1++) { + if (storyName.equals(dataSource[i1].storyName)) { // Generate a cached path to the storyName asset data - // - String EXTERNPATH =DATASOURCEPATH + dataSource[i1].levelFolder + "/" + dataSource[i1].storyFolder + "/"; + String EXTERNPATH = DATASOURCEPATH + dataSource[i1].levelFolder + "/" + dataSource[i1].storyFolder + "/"; loadStory(EXTERNPATH, dataSource[i1].viewtype, assetLocation); - - // we're done break; } } } - public void next() { - try { - if (mViewManager != null) { - - mViewManager.nextWord(); - - } else { - CErrorManager.logEvent(TAG, "Error no DataSource : ", null, false); - } - } - catch(Exception e) { + if (mViewManager != null) mViewManager.nextWord(); + else CErrorManager.logEvent(TAG, "Error no DataSource: ", null, false); + } catch(Exception e) { CErrorManager.logEvent(TAG, "Data Exhuasted: next called past end of data", e, false); } - } - public boolean dataExhausted() { return mViewManager.endOfData(); } - //************************************************************************ //************************************************************************ // IEventListener -- Start - @Override public void onEvent(IEvent eventObject) { - - // We expect AUDIO_EVENTS from the narration type_audio nodes to let us know when // they are complete with an UTTERANCE_COMPLETE_EVENT - // if (mViewManager != null) { try { switch (eventObject.getType()) { - case TYPE_AUDIO: - // We expect AUDIO_EVENTS from the narration type_audio nodes to let us know when // they are complete with an UTTERANCE_COMPLETE_EVENT - // mViewManager.execCommand((String) eventObject.getString(AUDIO_EVENT), null); break; @@ -687,7 +589,6 @@ public void onEvent(IEvent eventObject) { //************************************************************************ // Component Message Queue -- Start - public class Queue implements Runnable { protected String _command; @@ -706,56 +607,39 @@ public String getCommand() { return _command; } - @Override public void run() { - try { queueMap.remove(this); - - if (mViewManager != null) { - mViewManager.execCommand(_command, _target); - } - } - catch(Exception e) { + if (mViewManager != null) mViewManager.execCommand(_command, _target); + } catch(Exception e) { CErrorManager.logEvent(TAG, "Run Error: cmd:" + _command + " tar: " + _target + " >", e, false); } } } - /** * Disable the input queues permenantly in prep for destruction * walks the queue chain to diaable scene queue - * */ private void terminateQueue() { - // disable the input queue permenantly in prep for destruction - // _qDisabled = true; flushQueue(); } - /** * Remove any pending scenegraph commands. * */ private void flushQueue() { + for (Object entry : queueMap.entrySet()) { + Log.d(TAG, "Post Cancelled on Flush: " + ((Queue)((Map.Entry)entry).getValue()).getCommand()); - Iterator tObjects = queueMap.entrySet().iterator(); - - while (tObjects.hasNext() ) { - Map.Entry entry = (Map.Entry) tObjects.next(); - - Log.d(TAG, "Post Cancelled on Flush: " + ((Queue)entry.getValue()).getCommand()); - - mainHandler.removeCallbacks((Queue)(entry.getValue())); + mainHandler.removeCallbacks((Queue)((Map.Entry)entry).getValue()); } } - /** * Keep a mapping of pending messages so we can flush the queue if we want to terminate * the tutor before it finishes naturally. @@ -765,17 +649,13 @@ private void flushQueue() { private void enQueue(Queue qCommand) { enQueue(qCommand, 0L); } - private void enQueue(Queue qCommand, Long delay) { + private void enQueue(Queue qCommand, Long delay) { if (!_qDisabled) { queueMap.put(qCommand, qCommand); - if (delay > 0L) { - mainHandler.postDelayed(qCommand, delay); - } - else { - mainHandler.post(qCommand); - } + if (delay > 0L) mainHandler.postDelayed(qCommand, delay); + else mainHandler.post(qCommand); } } @@ -787,12 +667,11 @@ private void enQueue(Queue qCommand, Long delay) { public void post(String command) { post(command, 0L); } - public void post(String command, Long delay) { + public void post(String command, Long delay) { enQueue(new Queue(command), delay); } - /** * Post a command and target to this scenegraph queue * @@ -801,12 +680,11 @@ public void post(String command, Long delay) { public void post(String command, Object target) { post(command, target, 0L); } - public void post(String command, Object target, Long delay) { + public void post(String command, Object target, Long delay) { enQueue(new Queue(command, target), delay); } - // Component Message Queue -- End //************************************************************************ //************************************************************************ @@ -814,7 +692,6 @@ public void post(String command, Object target, Long delay) { //************ Serialization - /** * Load the data source * @@ -822,7 +699,6 @@ public void post(String command, Object target, Long delay) { */ @Override public void loadJSON(JSONObject jsonData, IScope scope) { - JSON_Helper.parseSelf(jsonData, this, CClassMap.classMap, scope); } } diff --git a/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_ViewManagerASB.java b/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_ViewManagerASB.java index 2db7b446d..33cdb5ce8 100644 --- a/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_ViewManagerASB.java +++ b/comp_reading/src/main/java/cmu/xprize/rt_component/CRt_ViewManagerASB.java @@ -45,12 +45,9 @@ import cmu.xprize.util.IScope; import cmu.xprize.util.JSON_Helper; import cmu.xprize.util.TCONST; +import cmu.xprize.rt_component.CASB_Seg; import edu.cmu.xprize.listener.ListenerBase; -import static cmu.xprize.util.TCONST.FTR_USER_READ; -import static cmu.xprize.util.TCONST.FTR_USER_READING; -import static cmu.xprize.util.TCONST.QGRAPH_MSG; - /** * This view manager provides student UX for the African Story Book format used @@ -80,44 +77,50 @@ public class CRt_ViewManagerASB implements ICRt_ViewManager, ILoadableObject { private int mEvenIndex; private int mCurrViewIndex; + private String mCurrEffectiveVariant = ""; + private String mPrevEffectiveVariant; + private String mCurrPrompt = ""; + private String mPrevPrompt; + // state for the current storyName - African Story Book private String mCurrHighlight = ""; private int mCurrPage; - private boolean mLastPage; private int mCurrPara; private int mCurrLine; private int mCurrWord; private int mHeardWord; // The expected location of mCurrWord in heardWords - see PLRT version of onUpdate below private String speakButtonEnable = "DISABLE"; - private String speakButtonShow = "HIDE"; - private String pageButtonEnable = "DISABLE"; - private String pageButtonShow = "HIDE"; + private String speakButtonShow = "HIDE"; + private String pageButtonEnable = "DISABLE"; + private String pageButtonShow = "HIDE"; private int mPageCount; private int mParaCount; private int mLineCount; private int mWordCount; - private int attemptNum = 1; + private int attemptNum = 0; + private boolean skippedWord = false; private boolean storyBooting; + private boolean restartListener; private String[] wordsToDisplay; // current sentence words to display - contain punctuation private String[] wordsToSpeak; // current sentence words to hear private ArrayList wordsToListenFor; // current sentence words to build language model private String hearRead; - private Boolean echo = false; private CASB_Narration[] rawNarration; // The narration segmentation info for the active sentence private String rawSentence; // currently displayed sentence that need to be recognized private CASB_Seg narrationSegment; + private ArrayList prevFmtSentence; private String[] splitSegment; private int splitIndex = TCONST.INITSPLIT; private boolean endOfSentence = false; private ArrayList spokenWords; private int utteranceNdx; private int segmentNdx; - private String page_prompt; + private String pagePrompt; private int numUtterance; private CASB_Narration currUtterance; @@ -128,19 +131,18 @@ public class CRt_ViewManagerASB implements ICRt_ViewManager, ILoadableObject { private int segmentCurr; private String completedSentencesFmtd = ""; - private String completedSentences = ""; - private String futureSentencesFmtd = ""; - private String futureSentences = ""; - private boolean showWords = true; - private boolean showFutureWords = true; - private boolean showFutureContent = true; - private boolean listenFutureContent = false; + private String completedSentences = ""; + private String futureSentencesFmtd = ""; + private String futureSentences = ""; + private boolean showWords; + private boolean showFutureWords; + private boolean skippingWords = true; + private boolean listenFutureContent = false; private String assetLocation; private ArrayList wordsSpoken; private ArrayList futureSpoken; - // json loadable // ZZZ where the money gets loaded @@ -158,7 +160,6 @@ public class CRt_ViewManagerASB implements ICRt_ViewManager, ILoadableObject { // ZZZ the money public CASB_data[] data; - static final String TAG = "CRt_ViewManagerASB"; @@ -168,18 +169,17 @@ public class CRt_ViewManagerASB implements ICRt_ViewManager, ILoadableObject { * @param listener */ public CRt_ViewManagerASB(CRt_Component parent, ListenerBase listener) { - mParent = parent; mContext = mParent.getContext(); - mOddPage = (ViewGroup) android.support.percent.PercentRelativeLayout.inflate(mContext, R.layout.asb_oddpage, null); - mEvenPage = (ViewGroup) android.support.percent.PercentRelativeLayout.inflate(mContext, R.layout.asb_evenpage, null); + mOddPage = (ViewGroup)android.support.percent.PercentRelativeLayout.inflate(mContext, R.layout.asb_oddpage, null); + mEvenPage = (ViewGroup)android.support.percent.PercentRelativeLayout.inflate(mContext, R.layout.asb_evenpage, null); mOddPage.setVisibility(View.GONE); mEvenPage.setVisibility(View.GONE); - mOddIndex = mParent.addPage(mOddPage ); - mEvenIndex = mParent.addPage(mEvenPage ); + mOddIndex = mParent.addPage(mOddPage); + mEvenIndex = mParent.addPage(mEvenPage); mListener = listener; } @@ -190,14 +190,13 @@ public CRt_ViewManagerASB(CRt_Component parent, ListenerBase listener) { * Set - storyBooting flag to inhibit startListening so the script can complete whatever * preparation is required before the listener starts. Otherwise you get junk hypotheses. * - * Once the script has completed its introduction etc. it calls nextline to cause a line increment + * Once the script has completed its introduction etc. it calls nextLine to cause a line increment * which resets storyBooting and enables the listener for the first sentence in the storyName. * * @param owner * @param assetPath */ public void initStory(IVManListener owner, String assetPath, String location) { - mOwner = owner; mAsset = assetPath; // ZZZ assetPath... TCONST.EXTERN storyBooting = true; @@ -205,68 +204,28 @@ public void initStory(IVManListener owner, String assetPath, String location) { Log.d(TCONST.DEBUG_STORY_TAG, String.format("mAsset=%s -- assetLocation=%s", mAsset, assetLocation)); - if (mParent.testFeature(TCONST.FTR_USER_HIDE)) showWords = false; - if (mParent.testFeature(TCONST.FTR_USER_REVEAL)) showFutureWords = showFutureContent = false; - - Log.d(TAG, "initStory: showWords = " + showWords + ", showFutureWords = " + showFutureWords + ", showFutureContent = " + showFutureContent); - - mParent.setFeature(TCONST.FTR_STORY_STARTING, TCONST.ADD_FEATURE); - seekToPage(TCONST.ZERO); //TODO: CHECK - mParent.animatePageFlip(true,mCurrViewIndex); + mParent.animatePageFlip(true, mCurrViewIndex); } - /** * NOTE: we reset mCurrWord - last parm in seekToStoryPosition - * */ public void startStory() { - // reset boot flag to inhibit future calls // if (storyBooting) { - mParent.setFeature(TCONST.FTR_STORY_STARTING, TCONST.DEL_FEATURE); - - // Narration Mode (i.e. USER_HEAR) always narrates the story otherwise we - // start with USER_READ where the student reads aloud and if USER_ECHO - // is in effect we then toggle between READ and HEAR for each sentence. - // - if (mParent.testFeature(TCONST.FTR_USER_HEAR) || mParent.testFeature(TCONST.FTR_USER_HIDE) || mParent.testFeature(TCONST.FTR_USER_PARROT)) { - - hearRead = TCONST.FTR_USER_HEAR; - } else { - hearRead = FTR_USER_READ; - mParent.publishFeature(FTR_USER_READING); - } - storyBooting = false; - speakOrListen(); } } - - public void speakOrListen() { - - if (hearRead.equals(TCONST.FTR_USER_HEAR)) { - - mParent.applyBehavior(TCONST.NARRATE_STORY); - } - if (hearRead.equals(FTR_USER_READ)) { - - startListening(); - } - } - - @Override public void onDestroy() { } - /** * From the script writers perspective there is only one say button and one pageflip button * Since there are actually two of each - one on each page view we share the state between them and @@ -276,11 +235,8 @@ public void onDestroy() { * @param command */ public void setButtonState(View control, String command) { - try { - switch (command) { - case "ENABLE": control.setEnabled(true); break; @@ -294,17 +250,13 @@ public void setButtonState(View control, String command) { control.setVisibility(View.INVISIBLE); break; } - } - catch(Exception e) { + } catch(Exception e) { Log.d(TAG, "result:" + e); } } - public void setSpeakButton(String command) { - switch (command) { - case "ENABLE": speakButtonEnable = command; break; @@ -318,17 +270,12 @@ public void setSpeakButton(String command) { speakButtonShow = command; break; } - // Ensure the buttons reflect the current states - // updateButtons(); } - public void setPageFlipButton(String command) { - switch (command) { - case "ENABLE": Log.i("ASB", "ENABLE Flip Button"); pageButtonEnable = command; @@ -344,18 +291,12 @@ public void setPageFlipButton(String command) { pageButtonShow = command; break; } - // Ensure the buttons reflect the current states - // updateButtons(); } - private void updateButtons() { - - // Make the button states insensitive to the page - So the script does not have to - // worry about timing of setting button states. - // + // Make the button states insensitive to the page - So the script does not have to worry about timing of setting button states. setButtonState(mPageFlip, pageButtonEnable); setButtonState(mPageFlip, pageButtonShow); @@ -364,7 +305,7 @@ private void updateButtons() { mPageFlip.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { - Log.v(QGRAPH_MSG, "event.click: " + " CRt_ViewManagerASB: PAGEFLIP"); + Log.v(TCONST.QGRAPH_MSG, "event.click: " + " CRt_ViewManagerASB: PAGEFLIP"); mParent.onButtonClick(TCONST.PAGEFLIP_BUTTON); } @@ -372,7 +313,7 @@ public void onClick(View v) { mSay.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { - Log.v(QGRAPH_MSG, "event.click: " + " CRt_ViewManagerASB:onButtonClick SPEAKBUTTON"); + Log.v(TCONST.QGRAPH_MSG, "event.click: " + " CRt_ViewManagerASB:onButtonClick SPEAKBUTTON"); mParent.onButtonClick(TCONST.SPEAK_BUTTON); } @@ -383,14 +324,10 @@ public void onClick(View v) { * This configures the target display components to be populated with data. * * mPageImage - mPageText - * */ public void flipPage() { - // Note that we use zero based indexing so page zero is first page - i.e. odd - // if (mCurrPage % 2 == 0) { - mCurrViewIndex = mOddIndex; mPageImage = (ImageView) mOddPage.findViewById(R.id.SpageImage); mPageText = (TextView) mOddPage.findViewById(R.id.SstoryText); @@ -398,7 +335,6 @@ public void flipPage() { mPageFlip = (ImageButton) mOddPage.findViewById(R.id.SpageFlip); mSay = (ImageButton) mOddPage.findViewById(R.id.Sspeak); } else { - mCurrViewIndex = mEvenIndex; mPageImage = (ImageView) mEvenPage.findViewById(R.id.SpageImage); mPageText = (TextView) mEvenPage.findViewById(R.id.SstoryText); @@ -406,58 +342,42 @@ public void flipPage() { mPageFlip = (ImageButton) mEvenPage.findViewById(R.id.SpageFlip); mSay = (ImageButton) mEvenPage.findViewById(R.id.Sspeak); } - // Ensure the buttons reflect the current states - // updateButtons(); } - private void configurePageImage() { - InputStream in; try { if (assetLocation.equals(TCONST.EXTERN)) { - Log.d(TCONST.DEBUG_STORY_TAG, "loading image " + mAsset + data[mCurrPage].image); in = new FileInputStream(mAsset + data[mCurrPage].image); // ZZZ load image - } else if (assetLocation.equals(TCONST.EXTERN_SHARED)) { - Log.d(TCONST.DEBUG_STORY_TAG, "loading shared image " + mAsset + data[mCurrPage].image); in = new FileInputStream(mAsset + data[mCurrPage].image); // ZZZ load image } else { - Log.d(TCONST.DEBUG_STORY_TAG, "loading image from asset" + mAsset + data[mCurrPage].image); in = JSON_Helper.assetManager().open(mAsset + data[mCurrPage].image); // ZZZ load image } // ALAN_HILL (5) here is how to load the image...... NEXT NEXT NEXT mPageImage.setImageBitmap(BitmapFactory.decodeStream(in)); - } catch (IOException e) { - mPageImage.setImageBitmap(null); e.printStackTrace(); } } - private String[] splitWordOnChar(String[] wordArray, String splitChar) { - ArrayList wordList = new ArrayList<>(); for (String word : wordArray) { - String[] wordSplit = word.split(splitChar); if (wordSplit.length > 1) { - - for (int i1 = 0 ; i1 < wordSplit.length-1 ; i1++) { - wordList.add(wordSplit[i1] + splitChar); - } - wordList.add(wordSplit[wordSplit.length-1]); + for (int i1 = 0; i1 < wordSplit.length - 1; i1++) wordList.add(wordSplit[i1] + splitChar); + wordList.add(wordSplit[wordSplit.length - 1]); } else { wordList.add(wordSplit[0]); } @@ -466,12 +386,8 @@ private String[] splitWordOnChar(String[] wordArray, String splitChar) { return wordList.toArray(new String[wordList.size()]); } - private String[] splitRawSentence(String rawSentence) { - - String sentenceWords[]; - - sentenceWords = rawSentence.trim().split("\\s+"); + String[] sentenceWords = rawSentence.trim().split("\\s+"); sentenceWords = stripLeadingTrailing(sentenceWords, "'"); sentenceWords = splitWordOnChar(sentenceWords, "-"); @@ -480,7 +396,6 @@ private String[] splitRawSentence(String rawSentence) { return sentenceWords; } - /** * This cleans a raw sentence from the ASB. This is very idiosyncratic to the ASB content. * ASB contains some apostrophes used as single quotes that otherwise confuse the layout @@ -492,50 +407,30 @@ private String[] splitRawSentence(String rawSentence) { * @return */ private String processRawSentence(String rawSentence) { - - String[] sentenceWords; + String[] sentenceWords = splitRawSentence(rawSentence); StringBuilder sentence = new StringBuilder(); - sentenceWords = splitRawSentence(rawSentence); - - for (int i1 = 0 ; i1 < sentenceWords.length ; i1++) { - - if (sentenceWords[i1].endsWith("'") || sentenceWords[i1].endsWith("-")) { - sentence.append(sentenceWords[i1]); - } else { - sentence.append(sentenceWords[i1] + ((i1 < sentenceWords.length-1)? TCONST.WORD_SPACE: TCONST.NO_SPACE)); - } + for (int i1 = 0; i1 < sentenceWords.length; i1++) { + if (sentenceWords[i1].endsWith("'") || sentenceWords[i1].endsWith("-")) sentence.append(sentenceWords[i1]); + else sentence.append(sentenceWords[i1] + ((i1 < sentenceWords.length - 1)? TCONST.WORD_SPACE: TCONST.NO_SPACE)); } return sentence.toString(); } - private String stripLeadingTrailing(String sentence, String stripChar) { - - if (sentence.startsWith(stripChar)) { - sentence = sentence.substring(1); - } - if (sentence.endsWith(stripChar)) { - sentence = sentence.substring(0, sentence.length()-1); - } + if (sentence.startsWith(stripChar)) sentence = sentence.substring(1); + if (sentence.endsWith(stripChar)) sentence = sentence.substring(0, sentence.length() - 1); return sentence; } - private String[] stripLeadingTrailing(String[] wordArray, String stripChar) { - ArrayList wordList = new ArrayList<>(); for (String word : wordArray) { - - if (word.startsWith(stripChar)) { - word = word.substring(1); - } - if (word.endsWith(stripChar)) { - word = word.substring(0, word.length()-1); - } + if (word.startsWith(stripChar)) word = word.substring(1); + if (word.endsWith(stripChar)) word = word.substring(0, word.length() - 1); wordList.add(word); } @@ -543,26 +438,28 @@ private String[] stripLeadingTrailing(String[] wordArray, String stripChar) { return wordList.toArray(new String[wordList.size()]); } - /** * Reconfigure for a specific page / paragraph / line (seeks to) * * @param currPage * @param currPara * @param currLine + * @param currWord */ private void seekToStoryPosition(int currPage, int currPara, int currLine, int currWord) { + Log.d(TAG, "seekToStoryPosition: currPage: " + currPage + " - currPara: " + currPara + " - currLine: " + currLine + " - currWord: " + currWord); - String otherWordsToSpeak[]; + showWords = !(mParent.testFeature(TCONST.FTR_USER_HIDE)) && !(mParent.testFeature(TCONST.FTR_USER_PROMPT)); + showFutureWords = hearRead.equals(TCONST.FTR_USER_HEAR) || !(mParent.testFeature(TCONST.FTR_USER_REVEAL)); - completedSentencesFmtd = ""; - completedSentences = ""; - futureSentencesFmtd = ""; - futureSentences = ""; - wordsSpoken = new ArrayList<>(); - futureSpoken = new ArrayList<>(); + Log.d(TAG, "seekToStoryPosition: showWords = " + showWords + ", showFutureWords = " + showFutureWords); - Log.d(TAG, "seekToStoryPosition: Page: " + currPage + " - Paragraph: " + currPara + " - line: " + currLine + " - word: " + currWord); + completedSentencesFmtd = ""; + completedSentences = ""; + futureSentencesFmtd = ""; + futureSentences = ""; + wordsSpoken = new ArrayList<>(); + futureSpoken = new ArrayList<>(); // Optimization - Skip If seeking to the very first line // @@ -571,52 +468,37 @@ private void seekToStoryPosition(int currPage, int currPara, int currLine, int c // 1. A visually formatted representation of the words already spoken // 2. A list of words already spoken - for use in the Sphinx language model // - if (currPara > 0 || currLine > 0) { - - // First generate all completed paragraphs in their entirity - // - for (int paraIndex = 0 ; paraIndex < currPara ; paraIndex++) { - - for (CASB_Content rawContent : data[currPage].text[paraIndex]) { - - otherWordsToSpeak = rawContent.sentence.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); - + if (!mCurrEffectiveVariant.equals("story.prompt") && (currPara > 0 || currLine > 0)) { + // First generate all completed paragraphs in their entirety + for (int paraIndex = 0; paraIndex < currPara; paraIndex++) { + int paraLength = data[currPage].text[paraIndex].length; + for (int lineIndex = 0; lineIndex < paraLength; lineIndex++) { + rawSentence = data[currPage].text[paraIndex][lineIndex].sentence; // Add the previous line to the list of spoken words used to build the // language model - so it allows all on screen words to be spoken - // - for (String word : otherWordsToSpeak) - wordsSpoken.add(word); - - completedSentences += processRawSentence(rawContent.sentence) + TCONST.SENTENCE_SPACE; + for (String word : splitIntoWords(rawSentence)) wordsSpoken.add(word); + String variant = computeEffectiveVariant(currPage, paraIndex, lineIndex); + if (!variant.equals("story.prompt") && !variant.equals("story.hide")) completedSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; } - if (paraIndex < currPara) - completedSentences += "

"; + if (paraIndex < currPara) completedSentences += "

"; } // Then generate all completed sentences from the current paragraph - // - for (int lineIndex = 0 ; lineIndex < currLine ; lineIndex++) { - + for (int lineIndex = 0; lineIndex < currLine; lineIndex++) { rawSentence = data[currPage].text[currPara][lineIndex].sentence; - otherWordsToSpeak = rawSentence.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); - // Add the previous line to the list of spoken words used to build the // language model - so it allows all on screen words to be spoken - // - for (String word : otherWordsToSpeak) - wordsSpoken.add(word); - - completedSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; + for (String word : splitIntoWords(rawSentence)) wordsSpoken.add(word); + String variant = computeEffectiveVariant(currPage, currPara, lineIndex); + if (!variant.equals("story.prompt") && !variant.equals("story.hide")) completedSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; } // Note that we add a space after the sentence. - // completedSentencesFmtd = ""; completedSentencesFmtd += completedSentences; completedSentencesFmtd += ""; } - // Generate the active line of text - target sentence // Reset the highlight mCurrHighlight = TCONST.EMPTY; @@ -631,7 +513,6 @@ private void seekToStoryPosition(int currPage, int currPara, int currLine, int c rawNarration = data[currPage].text[currPara][currLine].narration; rawSentence = data[currPage].text[currPara][currLine].sentence; - if (data[currPage].prompt != null) page_prompt = data[currPage].prompt; // Words that are used to build the display text - include punctuation etc. // @@ -642,16 +523,13 @@ private void seekToStoryPosition(int currPage, int currPara, int currLine, int c // display if they contain apostrophes or hyphens into sub "words" - e.g. "thing's" -> "thing" "'s" // these are reconstructed by the highlight logic without adding spaces which it otherwise inserts // automatically. - // wordsToDisplay = splitRawSentence(rawSentence); - + if (currWord == 0) prevFmtSentence = new ArrayList<>(); // TODO: strip word-final or -initial apostrophes as in James' or 'cause. // Currently assuming hyphenated expressions split into two Asr words. - // - wordsToSpeak = rawSentence.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); - - mCurrWord = currWord; + wordsToSpeak = splitIntoWords(rawSentence); + mCurrWord = currWord; mWordCount = wordsToSpeak.length; // If we are showing future content - i.e. we want the entire page to be visible but @@ -663,159 +541,120 @@ private void seekToStoryPosition(int currPage, int currPara, int currLine, int c // 1. A visually formatted representation of the words not yet spoken // 2. A list of future words to be spoken - for use in the Sphinx language model // - if (showFutureContent) { - - // Generate all remaining sentences in the current paragraph - // - // Then generate all future sentences from the current paragraph - // - for (int lineIndex = currLine+1 ; lineIndex < mLineCount ; lineIndex++) { - + // Generate all remaining sentences from the current paragraph + if (!mCurrEffectiveVariant.equals("story.prompt")) { + for (int lineIndex = currLine + 1; lineIndex < mLineCount; lineIndex++) { rawSentence = data[currPage].text[currPara][lineIndex].sentence; - otherWordsToSpeak = rawSentence.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); - // Add the previous line to the list of spoken words used to build the // language model - so it allows all on screen words to be spoken - // - for (String word : otherWordsToSpeak) - futureSpoken.add(word); - - futureSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; + for (String word : splitIntoWords(rawSentence)) futureSpoken.add(word); + String variant = computeEffectiveVariant(currPage, currPara, lineIndex); + if (!variant.equals("story.prompt") && !variant.equals("story.hide") && !variant.equals("story.reveal")) futureSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; } - // First generate all completed paragraphs in their entirity + // Then generate all completed paragraphs in their entirety // - for (int paraIndex = currPara+1 ; paraIndex < mParaCount ; paraIndex++) { - + for (int paraIndex = currPara + 1; paraIndex < mParaCount; paraIndex++) { // Add the paragraph break if not at the end // futureSentences += "

"; - - for (CASB_Content rawSentence : data[currPage].text[paraIndex]) { - - otherWordsToSpeak = rawSentence.sentence.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); - + int paraLength = data[currPage].text[paraIndex].length; + for (int lineIndex = 0; lineIndex < paraLength; lineIndex++) { + rawSentence = data[currPage].text[paraIndex][lineIndex].sentence; // Add the previous line to the list of spoken words used to build the // language model - so it allows all on screen words to be spoken - // - for (String word : otherWordsToSpeak) - futureSpoken.add(word); - - futureSentences += processRawSentence(rawSentence.sentence) + TCONST.SENTENCE_SPACE; + for (String word : splitIntoWords(rawSentence)) futureSpoken.add(word); + String variant = computeEffectiveVariant(currPage, paraIndex, lineIndex); + if (!variant.equals("story.prompt") && !variant.equals("story.hide") && !variant.equals("story.reveal")) futureSentences += processRawSentence(rawSentence) + TCONST.SENTENCE_SPACE; } } - // TODO : parameterize the color + // TODO: parameterize the color futureSentencesFmtd = ""; futureSentencesFmtd += futureSentences; futureSentencesFmtd += ""; } - - // Publish the state out to the scripting scope in the tutor - // publishStateValues(); - - // Update the sentence display - // UpdateDisplay(); - - // Once past the storyName initialization stage - Listen for the target word - - // - if (!storyBooting) - speakOrListen(); - } + private String[] splitIntoWords(String text) { + return text.replace('-', ' ').replaceAll("['.!?,:;\"\\(\\)]", " ").toUpperCase(Locale.US).trim().split("\\s+"); + } - private void initSegmentation(int _uttNdx, int _segNdx) { + private void initSegmentation(int _uttNdx) { + Log.d(TAG, "initSegmentation: _uttNdx = " + _uttNdx); - utteranceNdx = _uttNdx; - numUtterance = rawNarration.length; + utteranceNdx = _uttNdx; + numUtterance = rawNarration.length; currUtterance = rawNarration[utteranceNdx]; - segmentArray = rawNarration[utteranceNdx].segmentation; + segmentArray = rawNarration[utteranceNdx].segmentation; + if (segmentArray == null || segmentArray.length == 0) { + String[] words = splitIntoWords(currUtterance.utterances); + segmentArray = new CASB_Seg[words.length]; + for (int i = 0; i < words.length; i++) { + segmentArray[i] = new CASB_Seg(); + segmentArray[i].start = 0; + segmentArray[i].end = 0; + segmentArray[i].word = words[i]; + } + } - segmentNdx = _segNdx; - numSegments = segmentArray.length; + segmentNdx = 0; + numSegments = segmentArray.length; utterancePrev = utteranceNdx == 0 ? 0 : rawNarration[utteranceNdx - 1].until; - segmentPrev = utterancePrev; - - // Clean the extension off the end - could be either wav/mp3 - // - String filename = currUtterance.audio.toLowerCase(); - - if (filename.endsWith(".wav") || filename.endsWith(".mp3")) { - filename = filename.substring(0,filename.length()-4); - } + segmentPrev = utterancePrev; // Publish the current utterance within sentence - // - mParent.publishValue(TCONST.RTC_VAR_UTTERANCE, filename); + String filename = currUtterance.audio.toLowerCase(); + if (filename.endsWith(".wav") || filename.endsWith(".mp3")) filename = filename.substring(0, filename.length() - 4); + mParent.publishValue(TCONST.RTC_VAR_UTTERANCE, filename); // NOTE: Due to inconsistencies in the segmentation data, you cannot depend on it // having precise timing information. As a result the segment may timeout before the // audio has completed. To avoid this we use oncomplete in type_audio to push an // TRACK_SEGMENT back to this components queue. - // Tell the script to speak the new uttereance - // - // mParent.applyBehavior(TCONST.SPEAK_UTTERANCE); } - private void trackNarration(boolean start) { + Log.d(TAG, "trackNarration: start = " + start); if (start) { - - mHeardWord = 0; - splitIndex = TCONST.INITSPLIT; + mHeardWord = 0; + splitIndex = TCONST.INITSPLIT; endOfSentence = false; - initSegmentation(0, 0); + initSegmentation(0); - spokenWords = new ArrayList(); + spokenWords = new ArrayList(); - // Tell the script to speak the new uttereance - // - mParent.applyBehavior(TCONST.SPEAK_UTTERANCE); - - postDelayedTracker(); + // Tell the script to speak the new utterance + trackSegment(); } else { - // NOTE: The narration mode uses the ASR logic to simplify operation. In doing this /// it uses the wordsToSpeak array to progressively highlight the on screen text based /// on the timing found in the segmentation data. // // Special processing to account for apostrophes and hyphenated words - // Note the system listens for e.g. "WON'T" as [WON] [T] two words so if we provide "won't" then it "won't" match :) + // Note the system listens for e.g. "WON'T" as [WON] [T] two words so if we provide "won't" then it "won't" match:) // and the narration will freeze // This is a kludge to account for the fact that segmentation data does not split words with // hyphens or apostrophes into separate "words" the way the wordstospeak does. // Without this the narration will get out of sync - // if (splitIndex == TCONST.INITSPLIT) { splitSegment = narrationSegment.word.toUpperCase().split("[\\-']"); - splitIndex = 0; - spokenWords.add(splitSegment[splitIndex++]); - - } else if (splitIndex < splitSegment.length){ - - spokenWords.add(splitSegment[splitIndex++]); - } else { - - Log.d(TAG, "HERE"); } + if (splitIndex < splitSegment.length) spokenWords.add(splitSegment[splitIndex++]); // Update the display - // onUpdate(spokenWords.toArray(new String[spokenWords.size()])); // If the segment word is complete continue to the next segment - note that this is - // generally the case. Words are not usually split by pubctuation - // + // generally the case. Words are not usually split by punctuation if (splitIndex >= splitSegment.length) { - splitIndex = TCONST.INITSPLIT; // sentences are built from an array of utterances which are build from an array @@ -824,61 +663,41 @@ private void trackNarration(boolean start) { // Note the last segment is not timed. It is driven by the TRACK_COMPLETE event // from the audio mp3 playing. This is required as the segmentation data is not // sufficiently accurate to ensure we don't interrupt a playing utterance. - // - segmentNdx++; - if (segmentNdx >= numSegments) { - + if (++segmentNdx >= numSegments) { // If we haven't consumed all the utterances (i.e "narrations") in the // sentence prep the next // // NOTE: Prep the state and wait for the TRACK_COMPLETE event to invoke // trackSegment to continue or terminate - // - utteranceNdx++; - if (utteranceNdx < numUtterance) { - - initSegmentation(utteranceNdx, 0); - - } else { - - endOfSentence = true; - } - } - // All the segments except the last one are timed based on the segmentation data. - // i.e. the audio plays and this highlights words based on prerecorded durations. - // - else { + if (++utteranceNdx < numUtterance) initSegmentation(utteranceNdx); + else endOfSentence = true; + } else { + // All the segments except the last one are timed based on the segmentation data. + // i.e. the audio plays and this highlights words based on prerecorded durations. postDelayedTracker(); } - } - // If the segment word is split due to apostrophes or hyphens then consume them - // before continuing to the next segment. - // - else { + } else { + // If the segment word is split due to apostrophes or hyphens then consume them + // before continuing to the next segment. mParent.post(TCONST.TRACK_NARRATION, 0); } } } - private void postDelayedTracker() { + Log.d(TAG, "postDelayedTracker"); - narrationSegment = rawNarration[utteranceNdx].segmentation[segmentNdx]; - + narrationSegment = segmentArray[segmentNdx]; segmentCurr = utterancePrev + narrationSegment.end; - mParent.post(TCONST.TRACK_NARRATION, new Long((segmentCurr - segmentPrev) * 10)); - segmentPrev = segmentCurr; } - private void trackSegment() { + Log.d(TAG, "trackSegment"); if (!endOfSentence) { - // Tell the script to speak the new utterance - // mParent.applyBehavior(TCONST.SPEAK_UTTERANCE); postDelayedTracker(); } else { @@ -886,15 +705,9 @@ private void trackSegment() { } } - - public void execCommand(String command, Object target ) { - - long delay = 0; - + public void execCommand(String command, Object target) { switch (command) { - case TCONST.START_NARRATION: - trackNarration(true); break; @@ -911,126 +724,86 @@ public void execCommand(String command, Object target ) { break; case TCONST.TRACK_NARRATION: - trackNarration(false); break; case TCONST.TRACK_SEGMENT: - trackSegment(); break; case TCONST.NEXT_NODE: - mParent.nextNode(); break; case TCONST.SPEAK_EVENT: case TCONST.UTTERANCE_COMPLETE_EVENT: - mParent.applyBehavior(command); break; - } } - /** * Push the state out to the tutor domain. - * */ private void publishStateValues() { - Log.d(TAG, "publishStateValues: mCurrWord = " + mCurrWord + ", mWordCount = " + mWordCount); - String cummulativeState = TCONST.RTC_CLEAR; + String cumulativeState = TCONST.RTC_CLEAR; - // ensure encho state has a valid value. - // mParent.publishValue(TCONST.RTC_VAR_ECHOSTATE, TCONST.FALSE); mParent.publishValue(TCONST.RTC_VAR_PARROTSTATE, TCONST.FALSE); - if (prompt != null) { - mParent.publishValue(TCONST.RTC_VAR_PROMPT, prompt); - mParent.publishFeature((TCONST.FTR_PROMPT)); - } + mParent.publishValue(TCONST.RTC_VAR_PROMPT, mCurrPrompt); - if (page_prompt != null) { - mParent.publishValue(TCONST.RTC_VAR_PAGE_PROMPT, page_prompt); - mParent.publishFeature((TCONST.FTR_PAGE_PROMPT)); - } + mParent.setFeature(TCONST.FTR_PROMPT, mParent.testFeature(TCONST.FTR_USER_PROMPT) ? TCONST.ADD_FEATURE : TCONST.DEL_FEATURE); + mParent.setFeature(TCONST.FTR_PLAY_PROMPT, mParent.testFeature(TCONST.FTR_USER_PROMPT) || mPrevEffectiveVariant.equals("story.prompt") || mCurrPrompt.equals(mPrevPrompt) ? TCONST.DEL_FEATURE : TCONST.ADD_FEATURE); // Set the scriptable flag indicating the current state. - // if (mCurrWord >= mWordCount) { - - // In echo mode - After line has been echoed we switch to Read mode and - // read the next sentence. - // if (mParent.testFeature(TCONST.FTR_USER_ECHO) || mParent.testFeature(TCONST.FTR_USER_REVEAL) || mParent.testFeature(TCONST.FTR_USER_PARROT)) { - - // Read Mode - When user finishes reading switch to Narrate mode and - // narrate the same sentence - i.e. echo - // - if (hearRead.equals(FTR_USER_READ)) { - + if (hearRead.equals(TCONST.FTR_USER_READ)) { + // Read Mode - When user finishes reading switch to Narrate mode and narrate the same sentence - i.e. echo if (!mParent.testFeature(TCONST.FTR_USER_PARROT)) mParent.publishValue(TCONST.RTC_VAR_ECHOSTATE, TCONST.TRUE); - - hearRead = TCONST.FTR_USER_HEAR; - mParent.retractFeature(FTR_USER_READING); + mParent.retractFeature(TCONST.FTR_USER_READING); Log.d("ISREADING", "NO"); - cummulativeState = TCONST.RTC_LINECOMPLETE; - mParent.publishValue(TCONST.RTC_VAR_WORDSTATE, TCONST.LAST); - mListener.setPauseListener(true); - } - // Narrate mode - swithc back to READ and set line complete flags - // - else { - hearRead = FTR_USER_READ; - mParent.publishFeature(FTR_USER_READING); - + } else { + // Narrate mode - switch back to READ if (mParent.testFeature(TCONST.FTR_USER_PARROT)) mParent.publishValue(TCONST.RTC_VAR_PARROTSTATE, TCONST.TRUE); + mParent.publishFeature(TCONST.FTR_USER_READING); Log.d("ISREADING", "YES"); - - cummulativeState = TCONST.RTC_LINECOMPLETE; - mParent.publishValue(TCONST.RTC_VAR_WORDSTATE, TCONST.LAST); } - } else { - cummulativeState = TCONST.RTC_LINECOMPLETE; - mParent.publishValue(TCONST.RTC_VAR_WORDSTATE, TCONST.LAST); } + cumulativeState = TCONST.RTC_LINECOMPLETE; + mParent.publishValue(TCONST.RTC_VAR_WORDSTATE, TCONST.LAST); } else mParent.publishValue(TCONST.RTC_VAR_WORDSTATE, TCONST.NOT_LAST); - if (mCurrLine >= mLineCount-1) { - cummulativeState = TCONST.RTC_PARAGRAPHCOMPLETE; + if (mCurrLine >= mLineCount - 1) { + cumulativeState = TCONST.RTC_PARAGRAPHCOMPLETE; mParent.publishValue(TCONST.RTC_VAR_LINESTATE, TCONST.LAST); } else mParent.publishValue(TCONST.RTC_VAR_LINESTATE, TCONST.NOT_LAST); - if (mCurrPara >= mParaCount-1) { - cummulativeState = TCONST.RTC_PAGECOMPLETE; + if (mCurrPara >= mParaCount - 1) { + cumulativeState = TCONST.RTC_PAGECOMPLETE; mParent.publishValue(TCONST.RTC_VAR_PARASTATE, TCONST.LAST); } else mParent.publishValue(TCONST.RTC_VAR_PARASTATE, TCONST.NOT_LAST); - if (mCurrPage >= mPageCount-1) { - cummulativeState = TCONST.RTC_STORYCMPLETE; + if (mCurrPage >= mPageCount - 1) { + cumulativeState = TCONST.RTC_STORYCMPLETE; mParent.publishValue(TCONST.RTC_VAR_PAGESTATE, TCONST.LAST); } else mParent.publishValue(TCONST.RTC_VAR_PAGESTATE, TCONST.NOT_LAST); - // Publish the cumulative state out to the scripting scope in the tutor - // - mParent.publishValue(TCONST.RTC_VAR_STATE, cummulativeState); + mParent.publishValue(TCONST.RTC_VAR_STATE, cumulativeState); } - /** * Configure for specific Page * Assumes current storyName @@ -1039,57 +812,45 @@ private void publishStateValues() { */ @Override public void seekToPage(int pageIndex) { - mCurrPage = pageIndex; - if (mCurrPage > mPageCount-1) mCurrPage = mPageCount-1; - if (mCurrPage < TCONST.ZERO) mCurrPage = TCONST.ZERO; + if (mCurrPage > mPageCount - 1) mCurrPage = mPageCount - 1; + if (mCurrPage < TCONST.ZERO) mCurrPage = TCONST.ZERO; incPage(TCONST.ZERO); } @Override public void nextPage() { - - if (mCurrPage < mPageCount-1) { - incPage(TCONST.INCR); - } + if (mCurrPage < mPageCount - 1) incPage(TCONST.INCR); // Actually do the page animation - // mParent.animatePageFlip(true, mCurrViewIndex); } + @Override public void prevPage() { - - if (mCurrPage > 0) { - incPage(TCONST.DECR); - } + if (mCurrPage > 0) incPage(TCONST.DECR); //TODO: CHECK mParent.animatePageFlip(false, mCurrViewIndex); } private void incPage(int direction) { - mCurrPage += direction; // This configures the target display components to be populated with data. // mPageImage - mPageText - // flipPage(); - configurePageImage(); // Update the state vars - // Note that this must be done after flip and configure so the target text and image views - // are defined + // Note that this must be done after flip and configure so the target text and image views are defined // NOTE: we reset mCurrPara, mCurrLine and mCurrWord - // + setTutorFeatures(mCurrPage, TCONST.ZERO, TCONST.ZERO); seekToStoryPosition(mCurrPage, TCONST.ZERO, TCONST.ZERO, TCONST.ZERO); } - /** * Configure for specific Paragraph * Assumes current page @@ -1098,42 +859,33 @@ private void incPage(int direction) { */ @Override public void seekToParagraph(int paraIndex) { - mCurrPara = paraIndex; - if (mCurrPara > mParaCount-1) mCurrPara = mParaCount-1; - if (mCurrPara < TCONST.ZERO) mCurrPara = TCONST.ZERO; + if (mCurrPara > mParaCount - 1) mCurrPara = mParaCount - 1; + if (mCurrPara < TCONST.ZERO) mCurrPara = TCONST.ZERO; incPara(TCONST.ZERO); } @Override public void nextPara() { - - if (mCurrPara < mParaCount-1) { - incPara(TCONST.INCR); - } + if (mCurrPara < mParaCount - 1) incPara(TCONST.INCR); } @Override public void prevPara() { - - if (mCurrPara > 0) { - incPara(TCONST.DECR); - } + if (mCurrPara > 0) incPara(TCONST.DECR); } // NOTE: we reset mCurrLine and mCurrWord private void incPara(int incr) { - mCurrPara += incr; // Update the state vars - // + setTutorFeatures(mCurrPage, mCurrPara, TCONST.ZERO); seekToStoryPosition(mCurrPage, mCurrPara, TCONST.ZERO, TCONST.ZERO); } - /** * Configure for specific line * Assumes current page and paragraph @@ -1142,84 +894,64 @@ private void incPara(int incr) { */ @Override public void seekToLine(int lineIndex) { - mCurrLine = lineIndex; - if (mCurrLine > mLineCount-1) mCurrLine = mLineCount-1; - if (mCurrLine < TCONST.ZERO) mCurrLine = TCONST.ZERO; + if (mCurrLine > mLineCount - 1) mCurrLine = mLineCount - 1; + if (mCurrLine < TCONST.ZERO) mCurrLine = TCONST.ZERO; incLine(TCONST.ZERO); } @Override public void nextLine() { - - if (mCurrLine < mLineCount-1) { - incLine(TCONST.INCR); - } + if (mCurrLine < mLineCount - 1) incLine(TCONST.INCR); } + @Override public void prevLine() { - - if (mCurrLine > 0 ) { - incLine(TCONST.DECR); - } + if (mCurrLine > 0) incLine(TCONST.DECR); } /** * NOTE: we reset mCurrWord - last parm in seekToStoryPosition - * */ private void incLine(int incr) { - // reset boot flag to - // if (storyBooting) { - storyBooting = false; - speakOrListen(); } else { - mCurrLine += incr; // Update the state vars - // + setTutorFeatures(mCurrPage, mCurrPara, mCurrLine); seekToStoryPosition(mCurrPage, mCurrPara, mCurrLine, TCONST.ZERO); } } - /** * NOTE: we reset mCurrWord - last parm in seekToStoryPosition - * */ @Override public void echoLine() { - - // reset the echo flag - // mParent.publishValue(TCONST.RTC_VAR_ECHOSTATE, TCONST.FALSE); + hearRead = TCONST.FTR_USER_HEAR; // Update the state vars - // seekToStoryPosition(mCurrPage, mCurrPara, mCurrLine, TCONST.ZERO); } - /** * */ @Override public void parrotLine() { - mParent.publishValue(TCONST.RTC_VAR_PARROTSTATE, TCONST.FALSE); + hearRead = TCONST.FTR_USER_READ; - Log.d(TAG, "parrotLine"); - + // Update the state vars seekToStoryPosition(mCurrPage, mCurrPara, mCurrLine, TCONST.ZERO); } - /** * Configure for specific word * Assumes current page, paragraph and line @@ -1228,38 +960,24 @@ public void parrotLine() { */ @Override public void seekToWord(int wordIndex) { - mCurrWord = wordIndex; mHeardWord = 0; - if (mCurrWord > mWordCount-1) mCurrWord = mWordCount-1; - if (mCurrWord < TCONST.ZERO) mCurrWord = TCONST.ZERO; + if (mCurrWord > mWordCount - 1) mCurrWord = mWordCount - 1; + if (mCurrWord < TCONST.ZERO) mCurrWord = TCONST.ZERO; - // Update the state vars - // seekToStoryPosition(mCurrPage, mCurrPara, mCurrLine, wordIndex); - incWord(TCONST.ZERO); - - // Start listening from the new position - // - speakOrListen(); } - @Override public void nextWord() { - - if (mCurrWord < mWordCount) { - incWord(TCONST.INCR); - } + if (mCurrWord < mWordCount) incWord(TCONST.INCR); } + @Override public void prevWord() { - - if (mCurrWord > 0) { - incWord(TCONST.DECR); - } + if (mCurrWord > 0) incWord(TCONST.DECR); } /** @@ -1271,34 +989,78 @@ public void prevWord() { * @param incr */ private void incWord(int incr) { - mCurrWord += incr; // For instances where we are advancing the word manually through a script it is required // that you reset the highlight and the FTR_WRONG so the next word is highlighted correctly - // setHighLight(TCONST.EMPTY, false); mParent.UpdateValue(true); - // Publish the state out to the scripting scope in the tutor - // publishStateValues(); - - // Update the sentence display - // UpdateDisplay(); } + /** + * Reconfigure variant for a specific page / paragraph / line (seeks to) + * + * @param currPage + * @param currPara + * @param currLine + */ + private void setTutorFeatures(int currPage, int currPara, int currLine) { + Log.d(TAG, "setTutorFeatures: currPage = " + currPage + ", currPara = " + currPara + ", currLine = " + currLine); + + mPrevEffectiveVariant = mCurrEffectiveVariant; + mCurrEffectiveVariant = computeEffectiveVariant(currPage, currPara, currLine); + mParent.setTutorFeatures(mCurrEffectiveVariant); + + if (storyBooting) mParent.setFeature(TCONST.FTR_STORY_STARTING, TCONST.ADD_FEATURE); + restartListener = true; + + pagePrompt = data[currPage].prompt; + mPrevPrompt = mCurrPrompt; + + if (prompt != null) mCurrPrompt = prompt; + else if (mParent.testFeature(TCONST.FTR_USER_READ) || mParent.testFeature(TCONST.FTR_USER_ECHO) || mParent.testFeature(TCONST.FTR_USER_REVEAL)) mCurrPrompt = "Read to RoboTutor.mp3"; + else if (mParent.testFeature(TCONST.FTR_USER_HEAR) || mParent.testFeature(TCONST.FTR_USER_HIDE)) mCurrPrompt = "Listen.mp3"; + else if (mParent.testFeature(TCONST.FTR_USER_PARROT)) mCurrPrompt = "Repeat after RoboTutor.mp3"; + + // Narration Mode (i.e. USER_HEAR) always narrates the story otherwise we + // start with USER_READ where the student reads aloud and if USER_ECHO + // is in effect we then toggle between READ and HEAR for each sentence. + if (mParent.testFeature(TCONST.FTR_USER_PROMPT) || mParent.testFeature(TCONST.FTR_USER_HEAR) || mParent.testFeature(TCONST.FTR_USER_HIDE) || mParent.testFeature(TCONST.FTR_USER_PARROT)) { + hearRead = TCONST.FTR_USER_HEAR; + mParent.setFeature(TCONST.FTR_USER_READING, TCONST.DEL_FEATURE); + } else { + hearRead = TCONST.FTR_USER_READ; + mParent.setFeature(TCONST.FTR_USER_READING, TCONST.ADD_FEATURE); + } + + Log.d(TAG, "setTutorFeatures: hearRead = " + hearRead); + } + + private String computeEffectiveVariant(int currPage, int currPara, int currLine) { + String pageVariant = data[currPage].variant; + String sentenceVariant = data[currPage].text[currPara][currLine].variant; + String currEffectiveVariant = (sentenceVariant != null) ? ("story." + sentenceVariant) : (pageVariant != null) ? ("story." + pageVariant) : mParent.getTutorVariant(); + + Log.d(TAG, "computeEffectiveVariant: sentenceVariant = " + sentenceVariant + ", pageVariant = " + pageVariant + ", tutorVariant = " + mParent.getTutorVariant()); + Log.d(TAG, "setTutorFcomputeEffectiveVarianteatures: currEffectiveVariant = " + currEffectiveVariant); + + return currEffectiveVariant; + } + /** * This picks up listening from the last word - so it seeks to wherever we are in the * current sentence and listens from there. */ public void continueListening() { - speakOrListen(); + if (hearRead.equals(TCONST.FTR_USER_HEAR)) mParent.applyBehavior(TCONST.NARRATE_STORY); + if (hearRead.equals(TCONST.FTR_USER_READ) && restartListener) startListening(); } - private void startListening() { + Log.d(TAG, "startListening"); // We allow the user to say any of the onscreen words but set the priority order of how we // would like them matched Note that if the listener is not explicitly listening for a word @@ -1307,34 +1069,20 @@ private void startListening() { // for the current target word. // 1. Start with the target word on the target sentence // 2. Add the words from there to the end of the sentence - just to permit them - // 3. Add the words alread spoken from the other lines - just to permit them + // 3. Add the words already spoken from the other lines - just to permit them // // "Permit them": So the language model is listening for them as possibilities. - // wordsToListenFor = new ArrayList<>(); - for (int i1 = mCurrWord; i1 < wordsToSpeak.length; i1++) { - wordsToListenFor.add(wordsToSpeak[i1]); - } - for (int i1 = 0; i1 < mCurrWord; i1++) { - wordsToListenFor.add(wordsToSpeak[i1]); - } - for (String word : wordsSpoken) { - wordsToListenFor.add(word); - } + for (int i1 = mCurrWord; i1 < wordsToSpeak.length; i1++) wordsToListenFor.add(wordsToSpeak[i1]); + for (int i1 = 0; i1 < mCurrWord; i1++) wordsToListenFor.add(wordsToSpeak[i1]); + for (String word : wordsSpoken) wordsToListenFor.add(word); // If we want to listen for all the words that are visible - // - if (listenFutureContent) { - for (String word : futureSpoken) { - wordsToListenFor.add(word); - } - } + if (listenFutureContent) for (String word : futureSpoken) wordsToListenFor.add(word); // Start listening - // if (mListener != null) { - // reset the relative position of mCurrWord in the incoming PLRT heardWords array mHeardWord = 0; mListener.reInitializeListener(true); @@ -1342,86 +1090,70 @@ private void startListening() { mListener.listenFor(wordsToListenFor.toArray(new String[wordsToListenFor.size()]), 0); mListener.setPauseListener(false); + restartListener = false; } } - /** * Scipting mechanism to update target word highlight * @param highlight */ @Override public void setHighLight(String highlight, boolean update) { - mCurrHighlight = highlight; - - // Update the sentence display - // - if (update) - UpdateDisplay(); + if (update) UpdateDisplay(); } - /** * Update the displayed sentence */ private void UpdateDisplay() { + Log.d(TAG, "UpdateDisplay: hearRead = " + hearRead); - if (showWords) { - String fmtSentence = ""; + mPageText.setText(Html.fromHtml("")); - for (int i = 0; i < wordsToDisplay.length; i++) { - - String styledWord = wordsToDisplay[i]; // default plain - - if (i < mCurrWord) { - styledWord = "" + styledWord + ""; + String fmtSentence = ""; + if (showWords) { + for (int index = 0; index < wordsToDisplay.length; index++) { + String styledWord = wordsToDisplay[index]; // default plain + + if (index < mCurrWord) { + if (index < prevFmtSentence.size()) styledWord = prevFmtSentence.get(index); + else { + if (!skippedWord) styledWord = "" + styledWord + ""; + prevFmtSentence.add(index, styledWord); + } } - if (i == mCurrWord) {// style the next expected word - - if (!mCurrHighlight.equals(TCONST.EMPTY)) - styledWord = "" + styledWord + ""; - + if (showFutureWords && index == mCurrWord) { + if (!mCurrHighlight.equals(TCONST.EMPTY)) styledWord = "" + styledWord + ""; styledWord = "" + styledWord + ""; } - if (showFutureWords || i < mCurrWord) { - if (wordsToDisplay[i].endsWith("'") || wordsToDisplay[i].endsWith("-")) { - fmtSentence += styledWord; - } else { - fmtSentence += styledWord + ((i < wordsToDisplay.length - 1) ? TCONST.WORD_SPACE : TCONST.NO_SPACE); - } + if (index > mCurrWord && hearRead.equals(TCONST.FTR_USER_HEAR)) styledWord = "" + styledWord + ""; + + if (showFutureWords || index < mCurrWord) { + fmtSentence += styledWord + ((wordsToDisplay[index].endsWith("'") || wordsToDisplay[index].endsWith("-") || (index >= wordsToDisplay.length - 1)) ? TCONST.NO_SPACE : TCONST.WORD_SPACE); } } + } - // Generate the text to be displayed - // - String content = completedSentencesFmtd + fmtSentence; - - if (showFutureContent) - content += TCONST.SENTENCE_SPACE + futureSentencesFmtd; - - mPageText.setText(Html.fromHtml(content)); + // Generate the text to be displayed + String content = completedSentencesFmtd + fmtSentence + TCONST.SENTENCE_SPACE + futureSentencesFmtd; + mPageText.setText(Html.fromHtml(content)); - Log.d(TAG, "Story Sentence Text: " + content); - } + Log.d(TAG, "Story Sentence Text: " + content); if (showWords && (showFutureWords || mCurrWord > 0)) broadcastActiveTextPos(mPageText, wordsToDisplay); // Publish the current word / sentence / remaining words for use in scripts - // if (mCurrWord < wordsToSpeak.length) { mParent.publishValue(TCONST.RTC_VAR_WORDVALUE, wordsToSpeak[mCurrWord]); - - String remaining[] = Arrays.copyOfRange(wordsToSpeak, mCurrWord, wordsToSpeak.length); - - mParent.publishValue(TCONST.RTC_VAR_REMAINING, TextUtils.join(" ", remaining)); + mParent.publishValue(TCONST.RTC_VAR_REMAINING, TextUtils.join(" ", Arrays.copyOfRange(wordsToSpeak, mCurrWord, wordsToSpeak.length))); mParent.publishValue(TCONST.RTC_VAR_SENTENCE, TextUtils.join(" ", wordsToSpeak)); } } - /** * * @param text @@ -1429,31 +1161,25 @@ private void UpdateDisplay() { * @return */ private PointF broadcastActiveTextPos(TextView text, String[] words){ - - PointF point = new PointF(0,0); - int charPos = 0; - int maxPos; + PointF point = new PointF(0,0); + int charPos = 0; + int maxPos; try { Layout layout = text.getLayout(); if (layout != null && mCurrWord < words.length) { - // Point to the start of the Target sentence (mCurrLine) - charPos = completedSentences.length(); + charPos = completedSentences.length(); // Find the starting character of the current target word - for (int i1 = 0; i1 <= mCurrWord; i1++) { - charPos += words[i1].length() + 1; - } + for (int i1 = 0; i1 <= mCurrWord; i1++) charPos += words[i1].length() + 1; // Look at the end of the target word charPos -= 1; - // Note that sending a value greater than maxPos will corrupt the textView - so - // guarantee this will never happen. - // - maxPos = text.getText().length(); + // Note that sending a value greater than maxPos will corrupt the textView - so guarantee this will never happen. + maxPos = text.getText().length(); charPos = (charPos > maxPos) ? maxPos : charPos; point.x = layout.getPrimaryHorizontal(charPos); @@ -1463,7 +1189,6 @@ private PointF broadcastActiveTextPos(TextView text, String[] words){ CPersonaObservable.broadcastLocation(text, TCONST.LOOKAT, point); } - } catch (Exception e) { Log.d(TAG, "broadcastActiveTextPos: " + e.toString()); } @@ -1471,26 +1196,21 @@ private PointF broadcastActiveTextPos(TextView text, String[] words){ return point; } - /** * This is where we process words being narrated * */ @Override public void onUpdate(String[] heardWords) { + boolean result = true; + String logString = ""; - boolean result = true; - String logString = ""; + for (int i = 0; i < heardWords.length; i++) logString += heardWords[i].toLowerCase() + " | "; - for (int i = 0; i < heardWords.length; i++) { - logString += heardWords[i].toLowerCase() + " | "; - } Log.i("ASR", "Update Words Spoken: " + logString); while (mHeardWord < heardWords.length) { - if (wordsToSpeak[mCurrWord].equals(heardWords[mHeardWord])) { - nextWord(); mHeardWord++; @@ -1514,7 +1234,6 @@ public void onUpdate(String[] heardWords) { mParent.UpdateValue(result); } - /** * This is where the incoming PLRT ASR data is processed. * @@ -1525,45 +1244,57 @@ public void onUpdate(String[] heardWords) { * to using 2 simultaneous decoders one for the correct sentence and one for any other "distractor" * words. i.e. other words in the sentence in this case. * - * TODO: check if it is possible for the hypothesis to chamge between last update and final hyp + * TODO: check if it is possible for the hypothesis to change between last update and final hyp */ @Override public void onUpdate(ListenerBase.HeardWord[] heardWords, boolean finalResult) { - - boolean result = true; - String logString = ""; + boolean result = true; + String logString = ""; try { for (int i = 0; i < heardWords.length; i++) { - if (heardWords[i] != null) { - logString += heardWords[i].hypWord.toLowerCase() + ":" + heardWords[i].iSentenceWord + " | "; - } else { - logString += "VIRTUAL | "; - } + if (heardWords[i] != null) logString += heardWords[i].hypWord.toLowerCase() + ":" + heardWords[i].iSentenceWord + " | "; + else logString += "VIRTUAL | "; } while ((mCurrWord < wordsToSpeak.length) && (mHeardWord < heardWords.length)) { + if (wordsToSpeak[mCurrWord].equals(heardWords[mHeardWord].hypWord) || ("START_" + wordsToSpeak[mCurrWord]).equals(heardWords[mHeardWord].hypWord)) { + Log.i("ASR", "RIGHT"); - if (wordsToSpeak[mCurrWord].equals(heardWords[mHeardWord].hypWord)) { - + skippedWord = false; nextWord(); mHeardWord++; - mListener.updateNextWordIndex(mHeardWord); + attemptNum = 0; + result = true; + mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord - 1, heardWords[mHeardWord - 1].hypWord, attemptNum, heardWords[mHeardWord - 1].utteranceId == "", true); + } else if (skippingWords && !skippedWord && attemptNum == 0 && mCurrWord + 1 < wordsToSpeak.length && (wordsToSpeak[mCurrWord + 1].equals(heardWords[mHeardWord].hypWord) || ("START_" + wordsToSpeak[mCurrWord + 1]).equals(heardWords[mHeardWord].hypWord))) { + Log.i("ASR", "SKIPPED"); - Log.i("ASR", "RIGHT"); + skippedWord = true; + nextWord(); attemptNum = 0; result = true; - mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord - 1, heardWords[mHeardWord - 1].hypWord, attemptNum, heardWords[mHeardWord - 1].utteranceId == "", result); + mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord - 1, "SKIPPED", attemptNum, heardWords[mHeardWord].utteranceId == "", false); + } else if (skippingWords && !skippedWord && attemptNum == 0) { + Log.i("ASR", "SKIPPED WRONG"); + skippedWord = true; + nextWord(); + mHeardWord++; + mListener.updateNextWordIndex(mHeardWord); + attemptNum = 0; + result = true; + mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord - 1, "SKIPPED_" + heardWords[mHeardWord - 1].hypWord, attemptNum, heardWords[mHeardWord - 1].utteranceId == "", false); } else { + Log.i("ASR", "WRONG"); + skippedWord = false; mListener.setPauseListener(true); - - Log.i("ASR", "WRONG"); + restartListener = true; attemptNum++; result = false; - mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord, heardWords[mHeardWord].hypWord, attemptNum, heardWords[mHeardWord].utteranceId == "", result); + mParent.updateContext(rawSentence, mCurrLine, wordsToSpeak, mCurrWord, heardWords[mHeardWord].hypWord, attemptNum, heardWords[mHeardWord].utteranceId == "", false); break; } } @@ -1575,29 +1306,22 @@ public void onUpdate(ListenerBase.HeardWord[] heardWords, boolean finalResult) { mParent.UpdateValue(result); mParent.onASREvent(TCONST.RECOGNITION_EVENT); - } catch (Exception e) { - Log.e("ASR", "onUpdate Fault: " + e); } } - public void generateVirtualASRWord() { - mListener.setPauseListener(true); ListenerBase.HeardWord words[] = new ListenerBase.HeardWord[mHeardWord+1]; - words[mHeardWord] = new ListenerBase.HeardWord(wordsToSpeak[mCurrWord]); onUpdate(words, false); - mListener.setPauseListener(false); // startListening(); } - /** * This is where incoming JSGF ASR data would be processed. * @@ -1605,22 +1329,15 @@ public void generateVirtualASRWord() { */ @Override public void onUpdate(String[] heardWords, boolean finalResult) { - // String logString = ""; // -// for (String hypWord : heardWords) { -// logString += hypWord.toLowerCase() + ":" ; -// } -// Log.i("ASR", "New JSGF HypSet: " + logString); -// +// for (String hypWord : heardWords) logString += hypWord.toLowerCase() + ":"; +// Log.i("ASR", "New JSGF HypSet: " + logString); // // mParent.publishValue(TCONST.RTC_VAR_ATTEMPT, attemptNum++); -// // mParent.onASREvent(TCONST.RECOGNITION_EVENT); - } - @Override public boolean endOfData() { return false; @@ -1629,7 +1346,6 @@ public boolean endOfData() { //************ Serialization - /** * Load the data source * @@ -1637,7 +1353,6 @@ public boolean endOfData() { */ @Override public void loadJSON(JSONObject jsonData, IScope scope) { - JSON_Helper.parseSelf(jsonData, this, CClassMap.classMap, scope); } } diff --git a/util/src/main/java/cmu/xprize/util/TCONST.java b/util/src/main/java/cmu/xprize/util/TCONST.java index 20958dded..b35706c98 100644 --- a/util/src/main/java/cmu/xprize/util/TCONST.java +++ b/util/src/main/java/cmu/xprize/util/TCONST.java @@ -20,8 +20,6 @@ // global tutor constants -import android.os.Environment; - import java.util.HashMap; public class TCONST { @@ -39,14 +37,12 @@ public class TCONST { public static final String FTR_GOODBYE = "GOODBYE"; public static final int NUM_GOODBYE_SOUND_CLIPS = 9; // make sure this matches the number of sound clips in // the "EXIT_BUTTON_BEHAVIOR" object in activity_selector/animator_graph.json - public static final String SKILL_WRITING = "letters"; public static final String SKILL_STORIES = "stories"; public static final String SKILL_MATH = "numbers"; public static final String FINISH = "FINISH"; - // RoboTutor Version spec Index meaning 0.1.2.3 // Given 4.23.9.8 // Major release 4 | Feature release 23 | Fix release 9 | compatible Asset Version 8 @@ -56,7 +52,6 @@ public class TCONST { public static final int FIX_RELEASE = 2; public static final int ASSET_VERSION = 3; - // Spec elements for asset zip files releases // For assets to be compatible with Robotutor the ASSET_CODE_VERSION must match the ASSET_VERSION // @@ -64,7 +59,6 @@ public class TCONST { public static final int ASSET_RELEASE_VERSION = 1; public static final int ASSET_UPDATE_VERSION = 2; - // These represent the base name for assets delivered in Zip files and loaded // through calls to updateZipAsset @@ -106,7 +100,6 @@ public class TCONST { public static final String SAY_STIMULUS = "FTR_SAY"; public static final String SHOW_STIMULUS = "FTR_SHOW"; - public static final String ASM_DIGIT_OR_OVERHEAD_CORRECT = "ASM_DIGIT_OR_OVERHEAD_CORRECT"; public static final String ASM_DIGIT_OR_OVERHEAD_WRONG = "ASM_DIGIT_OR_OVERHEAD_WRONG"; public static final String ASM_CLICK_ON_DOT = "ASM_CLICK_ON_DOT"; @@ -137,7 +130,6 @@ public class TCONST { public static final String SET_BANNER_COLOR = "SET_BANNER_COLOR"; public static final String LAST_ATTEMPT = "FTR_LASTATTEMPT"; - public static final String FONT_FOLDER = "fonts/"; public static final String SHOWICONS = "SHOWICONS"; public static final String SHOWNUM = "SHOWNUM"; @@ -221,6 +213,7 @@ public class TCONST { public static final String FTR_USER_HIDE = "FTR_USER_HIDE"; public static final String FTR_USER_REVEAL = "FTR_USER_REVEAL"; public static final String FTR_USER_PARROT = "FTR_USER_PARROT"; + public static final String FTR_USER_PROMPT = "FTR_USER_PROMPT"; public static final String FTR_USER_READING = "FTR_USER_READING"; // UHQ public static final String FTR_GEN = "FTR_GEN"; @@ -245,8 +238,6 @@ public class TCONST { public static final int MAX_AKDATA = 10; public static final String FTR_COMPLETE = "FTR_COMPLETE"; - public static final String FTR_PROMPT = "FTR_PROMPT"; - public static final String FTR_PAGE_PROMPT = "FTR_PAGE_PROMPT"; public static final String START_PROGRESSIVE_UPDATE = "START_PROGRESSIVE_UPDATE"; public static final String START_INDETERMINATE_UPDATE = "START_INDETERMINATE_UPDATE"; @@ -269,7 +260,6 @@ public class TCONST { public static final String NEXT_SCENE = "NEXT_SCENE"; public static final String NEXT_WORD = "NEXT_WORD"; - // Core log message types - anumation scenegraph and queued scenegraph // public static final String TUTOR_STATE_MSG = "TSTag"; @@ -296,7 +286,6 @@ public class TCONST { public static final String SESSION_ID_VAR = "sessionId"; public static final String LAST_TUTOR = "LAST_TUTOR_PLAYED"; - static public HashMap colorMap = new HashMap(); // // This is used to map "states" to colors @@ -320,7 +309,6 @@ public class TCONST { public static final int STROKE_STIM_UNDERLINE = 5; - static public HashMap fontMap = new HashMap(); static { @@ -329,7 +317,6 @@ public class TCONST { fontMap.put("grundschrift-punkt", FONT_FOLDER + "Grundschrift-Punkt.otf"); } - //*** Reading Tutor compatible string combinations static public HashMap numberMap = new HashMap(); @@ -339,7 +326,6 @@ public class TCONST { numberMap.put("LANG_SW", "NA,SIFURI,MOJA,MBILI,TATU,NNE,TANO,SITA,SABA,NANE,TISA,KUMI,ISHIRINI,THELATHINI,AROBAINI,HAMSINI,SITINI,SABINI,THEMANINI,TISINI,MIA,ELFU,MILIONI,BILIONI,TRILIONI,KWADRILIONI"); } - // This is used to map "language features" to the story resources // these are located in the assets/ // Note: on Android these are case sensitive filenames @@ -361,7 +347,6 @@ public class TCONST { // JSON parameter constants - // Loader Constants static final public String TUTORROOT = "tutors"; @@ -422,7 +407,6 @@ public class TCONST { public static final String FIRST_SCENE = "GOTO_FIRST_SCENE"; public static final String REC_GLYPH = "REC_GLYPH"; - // CActionTrack track types // Note these must case-match the layer names in the Flash // timeline specification from which CActionTrack is derived @@ -477,7 +461,6 @@ public class TCONST { public static final String SUBGRAPH_RETURN_AND_GO = "SUBGRAPH_RETURN_AND_GO"; public static final String SUBGRAPH_RETURN_AND_WAIT = "SUBGRAPH_RETURN_AND_WAIT"; - // Condition parser FSM states public static final int STARTSTATE = 0; public static final int PARSESTATE = 1; @@ -504,7 +487,6 @@ public class TCONST { public static final char EOT = '\04'; - // type_action - command types public static final String CMD_DEBUG = "DEBUG"; public static final String CMD_WAIT = "WAIT"; @@ -512,7 +494,6 @@ public class TCONST { public static final String CMD_NEXT = "NEXT"; public static final String CMD_LAUNCH = "LAUNCH-TUTOR"; - // Intrinsic types public static final String TREFERENCE = "TReference"; @@ -529,13 +510,16 @@ public class TCONST { public static final String STARE_STOP = "STARE_STOP"; public static final String FTR_STORY_STARTING = "FTR_STORY_STARTING"; + public static final String FTR_PLAY_PROMPT = "FTR_PLAY_PROMPT"; + public static final String FTR_PROMPT = "FTR_PROMPT"; + public static final String FTR_PAGE_PROMPT = "FTR_PAGE_PROMPT"; public static final String FWCORRECT = "FTR_RIGHT"; public static final String FWINCORRECT = "FTR_WRONG"; public static final String FWUNKNOWN = "FTR_UNRECOGNIZED"; public static final String FTR_EOI = "FTR_NOWORDS"; public static final String FTR_EOD = "FTR_EOD"; - public static final String CONTINUE = "CONTINUE"; + public static final String CONTINUE = "CONTINUE"; public static final String ALL_CORRECT = "ALL_CORRECT"; public static final String LOG_CORRECT = "CORRECT"; @@ -548,7 +532,6 @@ public class TCONST { public static final String RAND = "random"; public static final String MINUSONE = "-1"; - // PocketSphinx Recognizer Constants public static final int UNKNOWNEVENT_TYPE = 0; @@ -570,7 +553,6 @@ public class TCONST { public static final int ALL_EVENTS = 0xFFFFFFFF; - public static final String ASR_TIMED_START_EVENT = "ASR_TIMED_START_EVENT"; public static final String ASR_RECOGNITION_EVENT = "ASR_RECOGNITION_EVENT"; public static final String ASR_ERROR_EVENT = "ASR_ERROR_EVENT"; @@ -604,8 +586,6 @@ public class TCONST { ASREventMap.put(ASR_ALL_EVENTS,TCONST.ALL_EVENTS); } - - public static final int NOINTERVENTION = 0; public static final int INSPEECH = 1; public static final int SAYWORD = 2; @@ -616,13 +596,11 @@ public class TCONST { public static final String SOURCEFILE = "[file]"; public static final String ASSETFILE = "[asset]"; - public static final String TTS = "TTS"; public static final String ASR = "ASR"; public static final String GLYPH_DATA = "GLYPH_DATA"; - // LTK messaging constants public static final String FW_STIMULUS = "FW_UPDATED"; public static final String FW_VALUE = "FW_VALUE"; @@ -639,17 +617,14 @@ public class TCONST { public static final String ROBOTUTOR_ASSET_FOLDER = "/robotutor_assets/"; public static final String GLYPHS_FOLDER = "/glyphs/"; - // Listener Control message types public static final String LISTENER_RESPONSE = "LISTENER_RESPONSE"; - // TTS command constants public static final String SAY = "SAY"; public static final String SET_RATE = "SET_RATE"; - - // Number Listeneing Component + // Number Listening Component private static final String[] placeValue = {".ones",".tens",".hundreds",".thousands",".millions",".billions"}; public static final String ERR_SINGLEDIGIT = "Single Digit Error"; @@ -684,13 +659,11 @@ public class TCONST { public static final boolean FALSE_NOERROR = false; public static final boolean FALSE_ERROR = false; - // MediaManager constants. public static final String MEDIA_AUDIO = "MEDIA_AUDIO"; public static final String MEDIA_TIMELINE = "MEDIA_TIMELINE"; public static final String DEFAULT_SOUND_PACKAGE = "default"; - // ASR (automated speech recognition) constants public static final Long STABLE_TIME = 300L; // Time a word has to be stable before it is emitted. public static final String FINAL_HYPOTHESIS = "FINALHYPOTHESIS"; @@ -746,7 +719,6 @@ public class TCONST { public static final int ERRW2N_INVALID_HUNDREDS = 36; // e.g. saying "thousand hundred" public static final int ERRW2N_INVALID_POWER = 37; // e.g. saying "and thousand" - public static final String[] W2N_ERRORMSG = {"NO_ERROR","NO_DATA","LEADING_CONJ","NONTERM_ZERO","NONSOLITARY_ZERO","MISSING_CONJ","MISSING_HUNDRED_MULTI","HUNDRED_ADDED_CONJ","INCREASING_POWER","REPEATED_POWER","POWER_CONJ","INCREASING_MULTIPLIER","REPEAT_CONJ","INTERNAL","ZERO_HUNDRED_MULTI","INVALID_TEXT"}; // W2N Warning types @@ -767,7 +739,6 @@ public class TCONST { public static final int NEXTPAGE = 1; public static final int PREVPAGE = -1; - // READING Tutor State names -- RTC Reading Tutor Component public static final String PAGEFLIP_BUTTON = "PAGE_FLIP_CLICK"; @@ -810,14 +781,14 @@ public class TCONST { public static final String RTC_CLEAR = ""; public static final String RTC_VAR_PROMPT = ".prompt"; - public static final String RTC_VAR_PAGE_PROMPT = ".page_prompt"; + public static final String RTC_VAR_PAGE_PROMPT = ".pagePrompt"; public static final String RTC_VAR_WORDVALUE = ".currentWord"; public static final String RTC_VAR_INDEX = ".wordindex"; public static final String RTC_VAR_REMAINING = ".remainingWords"; public static final String RTC_VAR_SENTENCE = ".sentence"; public static final String RTC_VAR_UTTERANCE = ".utterance"; - //Akira Game Prompt Situation + // Akira Game Prompt Situation public static final String PROMPT_1LEFT = "PROMPT_1LEFT"; public static final String PROMPT_1MID = "PROMPT_1MID"; public static final String PROMPT_1RIGHT = "PROMPT_1RIGHT"; @@ -827,14 +798,12 @@ public class TCONST { public static final String PROMPT_3 = "PROMPT_3"; public static final String PROMPT_3V = "PROMPT_3V"; - // Writing behavior... public static final int WRITING_DATA_LIMIT = 10; // Counting public static final String COUNTING_DEBUG_LOG = "COUNTING_DEBUG_LOG"; - // Data source debugger public static final String TAG_DEBUG_AKIRA = "akira"; @@ -852,7 +821,6 @@ public class TCONST { public static final String ROBO_DEBUG_FILE_ASM = "math_test.json"; public static final String ROBO_DEBUG_FILE_BPOP = "bpop.json"; - // Debugger Thumb key words // NEW_THUMBS trace me public enum Thumb {