Added voice control

Former-commit-id: 6f69079bf44f0d8f9ae40de6b0f1638d103464c2
This commit is contained in:
Ziver Koc 2015-05-13 21:14:10 +00:00
parent 35c92407a3
commit 53da641909
863 changed files with 192681 additions and 0 deletions

74
external/marytts-5.1.2/LICENSE.txt vendored Normal file
View file

@ -0,0 +1,74 @@
MARY Software User Agreement
11 April 2011
MARY is licensed under the following terms.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Applicable Licenses
MARY is built upon a number of other open source technologies and products.
Here is a list of those products with links to their licenses.
hts_engine: the HMM-based speech synthesis code in MARY TTS is based on HTS, ported to Java by DFKI. The original HTS can be obtained from
http://hts-engine.sourceforge.net/ -- it is released under the New and
Simplified BSD License.
freetts: MARY uses code from FreeTTS (http://freetts.sf.net) for various
processing modules and as the source of one method for waveform synthesis.
FreeTTS is licensed under the (BSD-style) FreeTTS license, see
doc/licenses/freetts-license.txt.
JTok: The JTok tokenizer from http://heartofgold.dfki.de is distributed
under the GNU Lesser General Public License, see http://www.gnu.org or
doc/licenses/LGPL.txt.
jsresources.jar: A few utility classes from http://www.jsresources.org
are distributed under the terms of the jsresources license, see
doc/licenses/jsresources-license.txt.
log4j: MARY uses log4j (http://logging.apache.org/log4j) as a logging
mechanism. log4j is distributed under the Apache Software License, see
http://www.apache.org or doc/licenses/apache-software-license.txt
JUnit: For unit testing of the java source, mary uses JUnit
(http://junit.org). JUnit is licensed under the Common Public License, see
http://junit.org or doc/licenses/CPL.txt.
java-diff: A java diff implementation from http://www.incava.org/projects/java-diff for input-output-comparisons in the
Mary Expert Interface. java-diff is licensed under the GNU Lesser General
Public License, see http://www.gnu.org or doc/licenses/LGPL.txt.
fast-md5: A fast md5 checksum implementation from http://www.twmacinta.com/myjava/fast_md5.php
used for computing checksums after downloading voices. fast-md5 is licensed under
the GNU Lesser General Public License, see http://www.gnu.org or doc/licenses/LGPL.txt.
JavaOpenAIR: MARY can optionally be used as an OpenAIR component,
building on the JavaOpenAIR reference implementation from
http://www.mindmakers.org, which is licensed under the
(BSD-style) JavaOpenAIR license, see doc/licenses/JavaOpenAIR-license.txt
(files concerned: JavaOpenAIR.jar)
mwdumper: A tool for extracting sets of pages from a MediaWiki dump file.
mwdumper is MIT-style like licensed, see http://www.mediawiki.org/wiki/Mwdumper
and for the license http://en.wikipedia.org/wiki/MIT_License.
(files concerned: mwdumper-2008-04-13.jar)
sgt: The Scientific Graphics Toolkit (sgt) is provided by the NOAA/PMEL/EPIC group (see http://www.epic.noaa.gov/java/sgt/) under the BSD-style EPIC license, see doc/licenses/epic-license.txt.
IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS
AND CONDITIONS PRIOR TO USE OF THIS CONTENT.

View file

@ -0,0 +1,10 @@
#!/bin/bash
##########################################################################
# MARY TTS client
##########################################################################
# Set the Mary base installation directory in an environment variable:
BINDIR="`dirname "$0"`"
export MARY_BASE="`(cd "$BINDIR"/.. ; pwd)`"
java -showversion -ea -Dserver.host=localhost -Dserver.port=59125 -jar "$MARY_BASE/lib/marytts-client-5.1.2-jar-with-dependencies.jar"

View file

@ -0,0 +1,10 @@
@echo off
set BINDIR=%~dp0
call :RESOLVE "%BINDIR%\.." MARY_BASE
java -showversion -ea -Dserver.host=localhost -Dserver.port=59125 -jar "%MARY_BASE%\lib\marytts-client-5.1.2-jar-with-dependencies.jar"
goto :EOF
:RESOLVE
set %2=%~f1
goto :EOF

View file

@ -0,0 +1,5 @@
#!/bin/sh
BINDIR="`dirname "$0"`"
export MARY_BASE="`(cd "$BINDIR"/.. ; pwd)`"
java -showversion -ea -Dmary.base="$MARY_BASE" $* -cp "$MARY_BASE/lib/*" marytts.tools.install.InstallerGUI

View file

@ -0,0 +1,9 @@
@echo off
set BINDIR=%~dp0
call :RESOLVE "%BINDIR%\.." MARY_BASE
java -showversion -ea -Dmary.base="%MARY_BASE%" -cp ".;%MARY_BASE%\lib\*" marytts.tools.install.InstallerGUI
goto :EOF
:RESOLVE
set %2=%~f1
goto :EOF

View file

@ -0,0 +1,11 @@
#!/bin/bash
##########################################################################
# MARY TTS server
##########################################################################
# Set the Mary base installation directory in an environment variable:
BINDIR="`dirname "$0"`"
export MARY_BASE="`(cd "$BINDIR"/.. ; pwd)`"
java -showversion -ea -Xms40m -Xmx1g -cp "$MARY_BASE/lib/*" -Dmary.base="$MARY_BASE" $* marytts.server.Mary

View file

@ -0,0 +1,14 @@
@echo off
rem Set the Mary base installation directory in an environment variable:
set BINDIR=%~dp0
call :RESOLVE "%BINDIR%\.." MARY_BASE
set CLASSPATH=".;%MARY_BASE%\lib\*"
java -showversion -ea -Xms40m -Xmx1g -cp %CLASSPATH% "-Dmary.base=%MARY_BASE%" marytts.server.Mary
goto :EOF
:RESOLVE
set %2=%~f1
goto :EOF

View file

@ -0,0 +1,367 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket, sys, types, getopt
languageNames = {'de':'German',
'en':'English',
'en_US':'US English',
'tib':'Tibetan'}
class MaryClient:
specificationVersion = "0.1"
"""Python implementation of a MARY TTS client"""
def __init__( self, host="cling.dfki.uni-sb.de", port=59125, profile=False, quiet=False ):
self.host = host
self.port = port
self.profile = profile
self.quiet = quiet
self.allVoices = None # array of Voice objects
self.voicesByLocaleMap = {} # Map locale strings to arrays of Voice objects
self.allDataTypes = None # array of DataType objects
self.inputDataTypes = None # array of DataType objects
self.outputDataTypes = None # array of DataType objects
self.serverExampleTexts = {}
self.voiceExampleTexts = {}
self.serverVersionInfo = u''
if not self.quiet:
sys.stderr.write( "MARY TTS Python Client %s\n" % ( self.specificationVersion ) )
try:
info = self.getServerVersionInfo()
except:
sys.stderr.write( "Problem connecting to mary server at %s:%i\n" % ( self.host, self.port ) )
raise
sys.stderr.write( "Connected to %s:%i, " % ( self.host, self.port ) )
sys.stderr.write( info )
sys.stderr.write( '\n' )
def __getServerInfo( self, request="", marySocket=None ):
"""Get answer to request from mary server. Returns a list of unicode strings,
each representing a line without the line break.
"""
closeSocket = False
if marySocket is None:
closeSocket = True
marySocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
marySocket.connect( ( self.host, self.port ) )
assert isinstance(marySocket, socket.SocketType)
maryFile = marySocket.makefile( 'rwb', 1 ) # read-write, line-buffered
maryFile.write( unicode( request+"\n" ).encode( 'utf-8' ) )
result = []
while True:
got = unicode( maryFile.readline().strip(), 'utf-8' )
# read until end of file or an empty line is read:
if not got: break
result.append(got)
if closeSocket:
marySocket.close()
return result
def getServerVersionInfo( self ):
"Get version info from server. Returns a unicode string"
if self.serverVersionInfo == u'':
# need to get it from server
self.serverVersionInfo = u'\n'.join(self.__getServerInfo("MARY VERSION"))
return self.serverVersionInfo
def getAllDataTypes(self, locale=None):
"""Obtain a list of all data types known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an array of DataType objects
"""
if self.allDataTypes is None:
self.__fillDataTypes()
assert self.allDataTypes is not None and len( self.allDataTypes ) > 0
if locale is None:
return self.allDataTypes
else:
assert isinstance(locale, types.UnicodeType), "Unexpected type for locale: '%s'" % (type(locale))
return [d for d in self.allDataTypes if d.locale is None or d.locale == locale]
def getInputDataTypes(self,locale=None):
"""Obtain a list of input data types known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an arry of DataType objects
"""
if self.inputDataTypes is None:
self.__fillDataTypes()
assert self.inputDataTypes is not None and len( self.inputDataTypes ) > 0
if locale is None:
return self.inputDataTypes
else:
assert isinstance(locale, types.UnicodeType), "Unexpected type for locale: '%s'" % (type(locale))
return [d for d in self.inputDataTypes if d.locale is None or d.locale == locale]
def getOutputDataTypes(self, locale=None):
"""Obtain a list of output data types known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an arry of DataType objects
"""
if self.outputDataTypes is None:
self.__fillDataTypes()
assert self.outputDataTypes is not None and len( self.outputDataTypes ) > 0
if locale is None:
return self.outputDataTypes
else:
assert isinstance(locale, types.UnicodeType), "Unexpected type for locale: '%s'" % (type(locale))
return [d for d in self.outputDataTypes if d.locale is None or d.locale == locale]
def __fillDataTypes( self ):
self.allDataTypes = []
self.inputDataTypes = []
self.outputDataTypes = []
marySocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
marySocket.connect( ( self.host, self.port ) )
# Expect a variable number of lines of the kind
# RAWMARYXML INPUT OUTPUT
# TEXT_DE LOCALE=de INPUT
# AUDIO OUTPUT
typeStrings = self.__getServerInfo( "MARY LIST DATATYPES", marySocket )
if not typeStrings or len(typeStrings) == 0:
raise IOError( "Could not get list of data types from Mary server" )
marySocket.close()
for typeString in typeStrings:
parts = typeString.split()
if len( parts ) == 0:
continue
name = parts[0]
isInputType = False
isOutputType = False
locale = None
for part in parts[1:]:
if part[:7] == "LOCALE=":
locale = part[7:]
elif part == "INPUT":
isInputType = True
elif part == "OUTPUT":
isOutputType = True
dt = DataType( name, locale, isInputType, isOutputType )
self.allDataTypes.append( dt )
if dt.isInputType:
self.inputDataTypes.append( dt )
if dt.isOutputType:
self.outputDataTypes.append( dt )
def getVoices( self, locale=None ):
"""Obtain a list of voices known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an array of Voice objects
"""
if self.allVoices is None:
self.__fillVoices()
assert self.allVoices is not None and len( self.allVoices ) > 0
if locale is None:
return self.allVoices
else:
assert isinstance(locale, types.UnicodeType), "Unexpected type for locale: '%s'" % (type(locale))
if self.voicesByLocaleMap.has_key(locale):
return self.voicesByLocaleMap[locale]
else:
raise Exception("No voices for locale '%s'" % (locale))
def __fillVoices( self ):
self.allVoices = []
self.voicesByLocaleMap = {}
marySocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
marySocket.connect( ( self.host, self.port ) )
# Expect a variable number of lines of the kind
# de7 de female
# us2 en male
# dfki-stadium-emo de male limited
voiceStrings = self.__getServerInfo( "MARY LIST VOICES", marySocket )
if not voiceStrings or len(voiceStrings) == 0:
raise IOError( "Could not get list of voices from Mary server" )
marySocket.close()
for voiceString in voiceStrings:
parts = voiceString.split()
if len( parts ) < 3:
continue
name = parts[0]
locale = parts[1]
gender = parts[2]
domain = None
if len( parts ) > 3:
domain = parts[3]
voice = Voice( name, locale, gender, domain )
self.allVoices.append( voice )
localeVoices = None
if self.voicesByLocaleMap.has_key( locale ):
localeVoices = self.voicesByLocaleMap[locale]
else:
localeVoices = []
self.voicesByLocaleMap[locale] = localeVoices
localeVoices.append( voice )
def getGeneralDomainVoices( self, locale=None ):
"""Obtain a list of general domain voices known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an array of Voice objects
"""
return [v for v in self.getVoices( locale ) if not v.isLimitedDomain]
def getLimitedDomainVoices( self, locale=None ):
"""Obtain a list of limited domain voices known to the server. If the information is not
yet available, the server is queried. This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities.
Returns an array of Voice objects
"""
return [v for v in self.getVoices( locale ) if v.isLimitedDomain]
def getAvailableLanguages(self):
""" Check available voices and return a list of tuples (abbrev, name)
representing the available languages -- e.g. [('en', 'English'),('de', 'German')].
"""
if self.allVoices is None:
self.__fillVoices()
assert self.allVoices is not None and len( self.allVoices ) > 0
languages = []
for l in self.voicesByLocaleMap.keys():
if languageNames.has_key(l):
languages.append((l,languageNames[l]))
else:
languages.append((l, l))
return languages
def getServerExampleText( self, dataType ):
"""Request an example text for a given data type from the server.
dataType the string representation of the data type,
e.g. "RAWMARYXML". This is optional information
which is not required for the normal operation of the client, but
may help to avoid incompatibilities."""
if not self.serverExampleTexts.has_key( dataType ):
exampleTexts = self.__getServerInfo( "MARY EXAMPLETEXT %s" % ( dataType ) )
if not exampleTexts or len(exampleTexts) == 0:
raise IOError( "Could not get example text for type '%s' from Mary server" % (dataType))
exampleText = u'\n'.join(exampleTexts)
self.serverExampleTexts[dataType] = exampleText
return self.serverExampleTexts[dataType]
def process( self, input, inputType, outputType, audioType=None, defaultVoiceName=None, output=sys.stdout ):
assert type( input ) in types.StringTypes
assert type( inputType ) in types.StringTypes
assert type( outputType ) in types.StringTypes
assert audioType is None or type( audioType ) in types.StringTypes
assert defaultVoiceName is None or type( defaultVoiceName ) in types.StringTypes
assert callable( getattr( output, 'write' ) )
if type( input ) != types.UnicodeType:
input = unicode( input, 'utf-8' )
maryInfoSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
maryInfoSocket.connect( ( self.host, self.port ) )
assert type( maryInfoSocket ) is socket.SocketType
maryInfo = maryInfoSocket.makefile( 'rwb', 1 ) # read-write, line-buffered
maryInfo.write( unicode( "MARY IN=%s OUT=%s" % ( inputType, outputType ), 'utf-8' ) )
if audioType:
maryInfo.write( unicode( " AUDIO=%s" % ( audioType ), 'utf-8' ) )
if defaultVoiceName:
maryInfo.write( unicode( " VOICE=%s" % ( defaultVoiceName ), 'utf-8' ) )
maryInfo.write( "\r\n" )
# Receive a request ID:
id = maryInfo.readline()
maryDataSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
maryDataSocket.connect( ( self.host, self.port ) )
assert type( maryDataSocket ) is socket.SocketType
maryDataSocket.sendall( id ) # includes newline
maryDataSocket.sendall( input.encode( 'utf-8' ) )
maryDataSocket.shutdown( 1 ) # shutdown writing
# Set mary info socket to non-blocking, so we only read somthing
# if there is something to read:
maryInfoSocket.setblocking( 0 )
while True:
try:
err = maryInfoSocket.recv( 8192 )
if err: sys.stderr.write( err )
except:
pass
got = maryDataSocket.recv( 8192 )
if not got: break
output.write( got )
maryInfoSocket.setblocking( 1 )
while True:
err = maryInfoSocket.recv( 8192 )
if not err: break
sys.stderr.write( err )
################ data representation classes ##################
class DataType:
def __init__( self, name, locale=None, isInputType=False, isOutputType=False ):
self.name = name
self.locale = locale
self.isInputType = isInputType
self.isOutputType = isOutputType
def isTextType( self ):
return self.name != "AUDIO"
class Voice:
def __init__( self, name, locale, gender, domain="general" ):
self.name = name
self.locale = locale
self.gender = gender
self.domain = domain
if not domain or domain == "general":
self.isLimitedDomain = False
else:
self.isLimitedDomain = True
def __str__(self):
if languageNames.has_key(self.locale):
langName = languageNames[self.locale]
else:
langName = self.locale
if self.isLimitedDomain:
return "%s (%s, %s %s)" % (self.name, self.domain, langName, self.gender)
else:
return "%s (%s %s)" % (self.name, langName, self.gender)
##################### Main #########################
if __name__ == '__main__':
serverHost = "cling.dfki.uni-sb.de"
serverPort = 59125
inputType = "TEXT"
outputType = "AUDIO"
audioType = "WAVE"
defaultVoice = None
inputEncoding = 'utf-8'
( options, rest ) = getopt.getopt( sys.argv[1:], '', \
['server.host=', 'server.port=', 'input.type=', 'output.type=', \
'audio.type=', 'voice.default=', 'input.encoding='] )
for ( option, value ) in options:
if option == '--server.host': serverHost = value
elif option == '--server.port': serverPort = int( value )
elif option == '--input.type': inputType = value
elif option == '--output.type': outputType = value
elif option == '--audio.type': audioType = value
elif option == '--voice.default': defaultVoice = value
elif option == '--input.encoding': inputEncoding = value
if len( rest )>0: # have input file
inputFile = file( rest[0] )
else:
inputFile = sys.stdin
input = unicode( ''.join( inputFile.readlines() ), inputEncoding )
if len( rest )>1: # also have output file
outputFile = file( rest[1] )
else:
outputFile = sys.stdout
maryClient = MaryClient( serverHost, serverPort )
maryClient.process( input, inputType, outputType, audioType, defaultVoice, outputFile )

View file

@ -0,0 +1,102 @@
/**
* Copyright 2000-2006 DFKI GmbH.
* All Rights Reserved. Use is subject to license terms.
*
* Permission is hereby granted, free of charge, to use and distribute
* this software and its documentation without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of this work, and to
* permit persons to whom this work is furnished to do so, subject to
* the following conditions:
*
* 1. The code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Any modifications must be clearly marked as such.
* 3. Original authors' names are not deleted.
* 4. The authors' names are not used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
* CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*/
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.Locale;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.LineEvent;
import javax.sound.sampled.LineListener;
import javax.sound.sampled.UnsupportedAudioFileException;
import marytts.util.data.audio.AudioPlayer;
import marytts.client.MaryClient;
import marytts.util.http.Address;
/**
* A demo class illustrating how to use the MaryClient class.
* This will connect to a MARY server, version 4.x.
* It requires maryclient.jar from MARY 4.0.
* This works transparently with MARY servers in both http and socket server mode.
*
* Compile this as follows:
* <code>javac -cp maryclient.jar MaryClientUser.java</code>
*
* And run as:
* <code>java -cp .:maryclient.jar MaryClientUser</code>
*
* @author marc
*
*/
public class MaryClientUser {
public static void main(String[] args)
throws IOException, UnknownHostException, UnsupportedAudioFileException,
InterruptedException
{
String serverHost = System.getProperty("server.host", "cling.dfki.uni-sb.de");
int serverPort = Integer.getInteger("server.port", 59125).intValue();
MaryClient mary = MaryClient.getMaryClient(new Address(serverHost, serverPort));
String text = "Willkommen in der Welt der Sprachsynthese!";
// If the given locale is not supported by the server, it returns
// an ambigous exception: "Problem processing the data."
String locale = "de"; // or US English (en-US), Telugu (te), Turkish (tr), ...
String inputType = "TEXT";
String outputType = "AUDIO";
String audioType = "WAVE";
String defaultVoiceName = null;
ByteArrayOutputStream baos = new ByteArrayOutputStream();
mary.process(text, inputType, outputType, locale, audioType, defaultVoiceName, baos);
// The byte array constitutes a full wave file, including the headers.
// And now, play the audio data:
AudioInputStream ais = AudioSystem.getAudioInputStream(
new ByteArrayInputStream(baos.toByteArray()));
LineListener lineListener = new LineListener() {
public void update(LineEvent event) {
if (event.getType() == LineEvent.Type.START) {
System.err.println("Audio started playing.");
} else if (event.getType() == LineEvent.Type.STOP) {
System.err.println("Audio stopped playing.");
} else if (event.getType() == LineEvent.Type.OPEN) {
System.err.println("Audio line opened.");
} else if (event.getType() == LineEvent.Type.CLOSE) {
System.err.println("Audio line closed.");
}
}
};
AudioPlayer ap = new AudioPlayer(ais, lineListener);
ap.start();
}
}

View file

@ -0,0 +1,45 @@
##########################################################################
# Copyright (C) 2000-2006 DFKI GmbH.
# All rights reserved. Use is subject to license terms.
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# 1. The code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Any modifications must be clearly marked as such.
# 3. Original authors' names are not deleted.
# 4. The authors' names are not used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
# CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
##########################################################################
CC=g++
CFLAGS=-Wall -w -O3 -g
ICUDIR=/usr/local/icu
ICULIBS=-Wl,-R,$(ICUDIR)/lib -L$(ICUDIR)/lib -licuuc -licui18n -ldl
all: MaryDemo
MaryDemo: MaryClient.o MaryDemo.o
$(CC) $(CFLAGS) *.o -o MaryDemo $(LIBS)
%.o: %.cc
$(CC) $(CFLAGS) $(RFLAGS) -o $@ -c $<
clean:
rm -rf *.o ./MaryDemo

View file

@ -0,0 +1,277 @@
/**
* Copyright 2000-2006 DFKI GmbH.
* All Rights Reserved. Use is subject to license terms.
*
* Permission is hereby granted, free of charge, to use and distribute
* this software and its documentation without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of this work, and to
* permit persons to whom this work is furnished to do so, subject to
* the following conditions:
*
* 1. The code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Any modifications must be clearly marked as such.
* 3. Original authors' names are not deleted.
* 4. The authors' names are not used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
* CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*/
#include <netdb.h>
#include <stdlib.h>
#include <iostream>
#include <string.h>
#include "MaryClient.h"
using namespace std;
/**
* A C++ implementation of a simple client to the MARY TTS system.
* result: an empty string serving as the container for the output.
* It will return text or audio data; text data will be encoded as UTF-8.
* inputText: the UTF-8 encoded text (or XML document) to send as a request
* maryInFormat: the input type of the data in inputText, e.g. TEXT
* maryOutFormat: the output type to produce, e.g. MBROLA, AUDIO
* locale: the language of the input, e.g. EN-US, DE
* audioType: for AUDIO output, the type of audio data to produce,
* e.g. WAVE or MP3.
* voice: the voice to be used, e.g. cmu-slt-hsmm, bits3.
* effects: the list of effects to be generated.
* return value: 0 on success, negative on failure.
*/
int
MaryClient::maryQuery( int server_port,
string server_host,
string& result,
string inputText,
string maryInFormat,
string maryOutFormat,
string locale,
string audioType,
string voice,
string effects ) {
// prepare the request
string query = "MARY";
query += " IN=" + maryInFormat;
query += " OUT=" + maryOutFormat;
query += " LOCALE=" + locale; // remove this line, if using an older version than MARY 4.0
query += " AUDIO=" + audioType;
query += " VOICE=" + voice;
if (effects != "") {
query += " EFFECTS=" + effects;
}
query += "\012\015";
//cout << "Constructed query: " << query << endl;
// declare connection stuff
struct sockaddr_in maryServer;
struct sockaddr_in maryClient;
struct hostent* hostInfo;
// declare variables
int maryInfoSocket;
int maryDataSocket;
// set configuration parameters
// get host information
hostInfo = gethostbyname (server_host.c_str());
if (hostInfo == NULL)
{
return -2;
}
// create a tcp connection to the mary server
maryInfoSocket = socket (AF_INET, SOCK_STREAM, 0);
// verify that the socket could be opened successfully
if (maryInfoSocket == -1)
{
return -2;
}
else
// autoflush stdout, bind and connect
{
maryClient.sin_family = AF_INET;
maryClient.sin_port = htons (0);
maryClient.sin_addr.s_addr = INADDR_ANY;
int status = bind (maryInfoSocket, (struct sockaddr*) &maryClient, sizeof (maryClient));
if (status != 0)
{
return -2;
}
maryServer.sin_family = AF_INET;
maryServer.sin_port = htons (server_port);
memcpy ((char*) &maryServer.sin_addr.s_addr, hostInfo->h_addr_list [0], hostInfo->h_length);
status = connect (maryInfoSocket, (struct sockaddr*) &maryServer, sizeof (maryServer));
if (status != 0)
{
return -2;
}
}
// send request to the Mary server
if (send (maryInfoSocket, query.c_str (), query.size (), 0) == -1)
{
return -2;
}
// receive the request id
char id [32] = "";
if (recv (maryInfoSocket, id, 32, 0) == -1)
{
return -2;
}
//cout << "Read id: " << id << endl;
// create a tcp connection to the mary server
maryDataSocket = socket (AF_INET, SOCK_STREAM, 0);
// verify that the socket could be opened successfully
if (maryDataSocket == -1)
{
return -2;
}
else
// autoflush stdout, bind and connect
{
maryClient.sin_family = AF_INET;
maryClient.sin_port = htons (0);
maryClient.sin_addr.s_addr = INADDR_ANY;
int status = bind (maryDataSocket, (struct sockaddr*) &maryClient, sizeof (maryClient));
if (status != 0)
{
return -2;
}
maryServer.sin_family = AF_INET;
maryServer.sin_port = htons (server_port);
memcpy ((char*) &maryServer.sin_addr.s_addr, hostInfo->h_addr_list [0], hostInfo->h_length);
status = connect (maryDataSocket, (struct sockaddr*) &maryServer, sizeof (maryServer));
if (status != 0)
{
return -2;
}
}
// send the request id to the Mary server
if (send (maryDataSocket, id, strlen (id), 0) == -1)
{
return -2;
}
//cout << "Sending request: " << inputText << endl;
// send the query to the Mary server
if (send (maryDataSocket, inputText.c_str (), inputText.size (), 0) == -1)
{
return -2;
}
if (send (maryDataSocket, "\012\015", 2, 0) == -1)
{
return -2;
}
// shutdown data socket
shutdown (maryDataSocket, 1);
//cout << "Reading result" << endl;
unsigned int total_bytes = 0;
int recv_bytes = 0;
char data [1024] = "";
result [0] = '\0';
// receive the request result
do
{
data [0] = '\0';
recv_bytes = recv (maryDataSocket, data, 1024, 0);
if (recv_bytes == -1)
{
return -2;
}
else if (recv_bytes > 0)
{
//cout << "("<<recv_bytes<<")" << endl;
total_bytes += recv_bytes;
data [recv_bytes] = '\0';
if (maryOutFormat == "AUDIO")
{
for (unsigned int i=0; i<recv_bytes; i++)
{
result += data [i];
}
}
else
{
result += data;
}
}
} while (recv_bytes != 0);
if (result.size () != total_bytes)
{
cerr << "error: total bytes received != result bytes!" << endl;
cerr << " total bytes received = " << total_bytes << endl;
cerr << " result bytes = " << result.size () << endl;
}
// receive the request error
do
{
data [0] = '\0';
recv_bytes = recv (maryInfoSocket, data, 1024, 0);
if (recv_bytes == -1)
{
return -2;
}
else if (recv_bytes > 0)
{
cerr << endl << "Mary error code: " << data << endl;
return -3;
}
} while (recv_bytes != 0);
// close all open sockets
close (maryInfoSocket);
close (maryDataSocket);
return 0;
}

View file

@ -0,0 +1,290 @@
/**
* Copyright 2000-2006 DFKI GmbH.
* All Rights Reserved. Use is subject to license terms.
*
* Permission is hereby granted, free of charge, to use and distribute
* this software and its documentation without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of this work, and to
* permit persons to whom this work is furnished to do so, subject to
* the following conditions:
*
* 1. The code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Any modifications must be clearly marked as such.
* 3. Original authors' names are not deleted.
* 4. The authors' names are not used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
* CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*/
#ifdef _WIN32
// use compiler option -L/<path to>/libwsock32.a
#include <winsock.h>
#else
#include <netdb.h>
#endif
#include <stdlib.h>
#include <iostream>
#include <string.h>
#include "MaryClient.h"
using namespace std;
/**
* A C++ implementation of a simple client to the MARY TTS system.
* result: an empty string serving as the container for the output.
* It will return text or audio data; text data will be encoded as UTF-8.
* inputText: the UTF-8 encoded text (or XML document) to send as a request
* maryInFormat: the input type of the data in inputText, e.g. TEXT
* maryOutFormat: the output type to produce, e.g. MBROLA, AUDIO
* locale: the language of the input, e.g. EN-US, DE
* audioType: for AUDIO output, the type of audio data to produce,
* e.g. WAVE or MP3.
* voice: the voice to be used, e.g. cmu-slt-hsmm, bits3.
* effects: the list of effects to be generated.
* return value: 0 on success, negative on failure.
*/
int
MaryClient::maryQuery( int server_port,
string server_host,
string& result,
string inputText,
string maryInFormat,
string maryOutFormat,
string locale,
string audioType,
string voice,
string effects ) {
// prepare the request
string query = "MARY";
query += " IN=" + maryInFormat;
query += " OUT=" + maryOutFormat;
query += " LOCALE=" + locale; // remove this line, if using an older version than MARY 4.0
query += " AUDIO=" + audioType;
query += " VOICE=" + voice;
if (effects != "") {
query += " EFFECTS=" + effects;
}
query += "\012\015";
//cout << "Constructed query: " << query << endl;
// declare connection stuff
struct sockaddr_in maryServer;
struct sockaddr_in maryClient;
struct hostent* hostInfo;
// declare variables
int maryInfoSocket;
int maryDataSocket;
// set configuration parameters
// get host information
hostInfo = gethostbyname (server_host.c_str());
if (hostInfo == NULL)
{
return -2;
}
// create a tcp connection to the mary server
maryInfoSocket = socket (AF_INET, SOCK_STREAM, 0);
// verify that the socket could be opened successfully
if (maryInfoSocket == -1)
{
return -2;
}
else
// autoflush stdout, bind and connect
{
maryClient.sin_family = AF_INET;
maryClient.sin_port = htons (0);
maryClient.sin_addr.s_addr = INADDR_ANY;
int status = bind (maryInfoSocket, (struct sockaddr*) &maryClient, sizeof (maryClient));
if (status != 0)
{
return -2;
}
maryServer.sin_family = AF_INET;
maryServer.sin_port = htons (server_port);
memcpy ((char*) &maryServer.sin_addr.s_addr, hostInfo->h_addr_list [0], hostInfo->h_length);
status = connect (maryInfoSocket, (struct sockaddr*) &maryServer, sizeof (maryServer));
if (status != 0)
{
return -2;
}
}
// send request to the Mary server
if (send (maryInfoSocket, query.c_str (), query.size (), 0) == -1)
{
return -2;
}
// receive the request id
char id [32] = "";
if (recv (maryInfoSocket, id, 32, 0) == -1)
{
return -2;
}
//cout << "Read id: " << id << endl;
// create a tcp connection to the mary server
maryDataSocket = socket (AF_INET, SOCK_STREAM, 0);
// verify that the socket could be opened successfully
if (maryDataSocket == -1)
{
return -2;
}
else
// autoflush stdout, bind and connect
{
maryClient.sin_family = AF_INET;
maryClient.sin_port = htons (0);
maryClient.sin_addr.s_addr = INADDR_ANY;
int status = bind (maryDataSocket, (struct sockaddr*) &maryClient, sizeof (maryClient));
if (status != 0)
{
return -2;
}
maryServer.sin_family = AF_INET;
maryServer.sin_port = htons (server_port);
memcpy ((char*) &maryServer.sin_addr.s_addr, hostInfo->h_addr_list [0], hostInfo->h_length);
status = connect (maryDataSocket, (struct sockaddr*) &maryServer, sizeof (maryServer));
if (status != 0)
{
return -2;
}
}
// send the request id to the Mary server
if (send (maryDataSocket, id, strlen (id), 0) == -1)
{
return -2;
}
//cout << "Sending request: " << inputText << endl;
// send the query to the Mary server
if (send (maryDataSocket, inputText.c_str (), inputText.size (), 0) == -1)
{
return -2;
}
if (send (maryDataSocket, "\012\015", 2, 0) == -1)
{
return -2;
}
// shutdown data socket
shutdown (maryDataSocket, 1);
//cout << "Reading result" << endl;
unsigned int total_bytes = 0;
int recv_bytes = 0;
char data [1024] = "";
result [0] = '\0';
// receive the request result
do
{
data [0] = '\0';
recv_bytes = recv (maryDataSocket, data, 1024, 0);
if (recv_bytes == -1)
{
return -2;
}
else if (recv_bytes > 0)
{
//cout << "("<<recv_bytes<<")" << endl;
total_bytes += recv_bytes;
data [recv_bytes] = '\0';
if (maryOutFormat == "AUDIO")
{
for (unsigned int i=0; i<recv_bytes; i++)
{
result += data [i];
}
}
else
{
result += data;
}
}
} while (recv_bytes != 0);
if (result.size () != total_bytes)
{
cerr << "error: total bytes received != result bytes!" << endl;
cerr << " total bytes received = " << total_bytes << endl;
cerr << " result bytes = " << result.size () << endl;
}
// receive the request error
do
{
data [0] = '\0';
recv_bytes = recv (maryInfoSocket, data, 1024, 0);
if (recv_bytes == -1)
{
return -2;
}
else if (recv_bytes > 0)
{
cerr << endl << "Mary error code: " << data << endl;
return -3;
}
} while (recv_bytes != 0);
#ifdef _WIN32
// close all open sockets Windows
closesocket (maryInfoSocket);
closesocket (maryDataSocket);
#else
// close all open sockets UNIX
close (maryInfoSocket);
close (maryDataSocket);
#endif
return 0;
}

View file

@ -0,0 +1,49 @@
/**
* Copyright 2000-2006 DFKI GmbH.
* All Rights Reserved. Use is subject to license terms.
*
* Permission is hereby granted, free of charge, to use and distribute
* this software and its documentation without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of this work, and to
* permit persons to whom this work is furnished to do so, subject to
* the following conditions:
*
* 1. The code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Any modifications must be clearly marked as such.
* 3. Original authors' names are not deleted.
* 4. The authors' names are not used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
* CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*/
#ifndef __MARYCLIENT_H_
#define __MARYCLIENT_H_
class MaryClient {
public:
// send query to the MaryServer
int maryQuery( int server_port,
std::string server_host,
std::string& result,
std::string inputText,
std::string maryInFormat,
std::string maryOutFormat,
std::string locale,
std::string audioType,
std::string voice,
std::string effects = "" );
};
#endif

View file

@ -0,0 +1,87 @@
/**
* Copyright 2000-2006 DFKI GmbH.
* All Rights Reserved. Use is subject to license terms.
*
* Permission is hereby granted, free of charge, to use and distribute
* this software and its documentation without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of this work, and to
* permit persons to whom this work is furnished to do so, subject to
* the following conditions:
*
* 1. The code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Any modifications must be clearly marked as such.
* 3. Original authors' names are not deleted.
* 4. The authors' names are not used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
* CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*/
// This version, adapted to MARY 4.0, provided by Sebastian Ptock.
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include "MaryClient.h"
using namespace std;
/**
* Demonstration code for using the MaryClient.
+ Call this as:
* ./MaryDemo
* or
* ./MaryDemo > output.wav
*/
int main() {
int server_port = 59125;
string server_host = "localhost";
string inputText = "Welcome to the world of speech synthesis!";
string maryInFormat = "TEXT";
string maryOutFormat = "AUDIO";
//string maryOutFormat = "REALISED_DURATIONS";
string locale = "en-US";
string audioType = "WAV_FILE";
string voice = "cmu-slt-hsmm";
string effects;
// effects += "Volume(amount:5.0;)+";
// effects += "TractScaler(amount:1.5;)+";
// effects += "F0Scale(f0Scale:2.0;)+";
// effects += "F0Add(f0Add:50.0;)+";
// effects += "Rate(durScale:1.5;)+";
// effects += "Robot(amount:100.0;)+";
// effects += "Whisper(amount:100.0;)+";
// effects += "Stadium(amount:100.0)+";
// effects += "Chorus(delay1:466;amp1:0.54;delay2:600;amp2:-0.10;delay3:250;amp3:0.30)+";
// effects += "FIRFilter(type:3;fc1:500.0;fc2:2000.0)+";
// effects += "JetPilot";
string result;
MaryClient maryClient;
maryClient.maryQuery( server_port, server_host, result, inputText, maryInFormat, maryOutFormat, locale, audioType, voice, effects);
if (maryOutFormat == "AUDIO") {
// write result into a file
const char *filename = "output.wav";
ofstream file( filename );
file << result;
// play output
//system("play output.wav");
} else {
cout << "RESULT: " << endl << result << endl;
}
return 0;
}

View file

@ -0,0 +1,4 @@
Start MARY as a socket server:
maryserver -Dserver=socket
(or change entry 'server' in conf/marybase.config)

View file

@ -0,0 +1,185 @@
#!/usr/bin/env python
import httplib, urllib
# A basic mary client in Python,
# kindly donated to the MARY TTS project
# by Hugh Sasse. Thanks Hugh!
# A very basic Python class for accessing
# the MARY TTS system using the modern
# HTTP server.
# Warning, this is probably ghastly Python,
# most of my time of late has been with
# other languages, so I'm not up to date
# with all the stylistic conventions of
# modern Python.
# This does seem to work OK though.
class maryclient:
"""A basic handler for MARY-TTS HTTP clients
At present, there is no checking for
allowed voices, locales, and so on.
Most of the useful parameters can be
accessed by get_ and set_ methods.
Relying on winsound, this is Windows
specific.
"""
def __init__(self):
"""Set up useful defaults (for
people in England, anyway)"""
self.host = "127.0.0.1"
self.port = 59125
self.input_type = "TEXT"
self.output_type = "AUDIO"
self.audio = "WAVE_FILE"
self.locale = "en_GB"
self.voice = "dfki-prudence-hsmm"
def set_host(self, a_host):
"""Set the host for the TTS server."""
self.host = a_host
def get_host(self):
"""Get the host for the TTS server."""
self.host
def set_port(self, a_port):
"""Set the port for the TTS server."""
self.port = a_port
def get_port(self):
"""Get the port for the TTS server."""
self.port
def set_input_type(self, type):
"""Set the type of input being
supplied to the TTS server
(such as 'TEXT')."""
self.input_type = type
def get_input_type(self):
"""Get the type of input being
supplied to the TTS server
(such as 'TEXT')."""
self.input_type
def set_output_type(self, type):
"""Set the type of input being
supplied to the TTS server
(such as 'AUDIO')."""
self.output_type = type
def get_output_type(self):
"""Get the type of input being
supplied to the TTS server
(such as "AUDIO")."""
self.output_type
def set_locale(self, a_locale):
"""Set the locale
(such as "en_GB")."""
self.locale = a_locale
def get_locale(self):
"""Get the locale
(such as "en_GB")."""
self.locale
def set_audio(self, audio_type):
"""Set the audio type for playback
(such as "WAVE_FILE")."""
self.audio = audio_type
def get_audio(self):
"""Get the audio type for playback
(such as "WAVE_FILE")."""
self.audio
def set_voice(self, a_voice):
"""Set the voice to speak with
(such as "dfki-prudence-hsmm")."""
self.voice = a_voice
def get_voice(self):
"""Get the voice to speak with
(such as "dfki-prudence-hsmm")."""
self.voice
def generate(self, message):
"""Given a message in message,
return a response in the appropriate
format."""
raw_params = {"INPUT_TEXT": message,
"INPUT_TYPE": self.input_type,
"OUTPUT_TYPE": self.output_type,
"LOCALE": self.locale,
"AUDIO": self.audio,
"VOICE": self.voice,
}
params = urllib.urlencode(raw_params)
headers = {}
# Open connection to self.host, self.port.
conn = httplib.HTTPConnection(self.host, self.port)
# conn.set_debuglevel(5)
conn.request("POST", "/process", params, headers)
response = conn.getresponse()
if response.status != 200:
print response.getheaders()
raise RuntimeError("{0}: {1}".format(response.status,
response.reason))
return response.read()
# If this is invoked as a program, just give
# a greeting to show it is working.
# The platform specific code is moved to this
# part so that this file may be imported without
# bringing platform specific code in.
if __name__ == "__main__":
# For handling command line arguments:
import sys
import platform
# check we are on Windows:
system = platform.system().lower()
if (system == "windows"):
import winsound
class Player:
def __init__(self):
pass
def play(self, a_sound):
winsound.PlaySound(a_sound, winsound.SND_MEMORY)
#if ("cygwin" in system):
else:
# Not sure how to do audio on cygwin,
# portably for python. So have a sound
# player class that doesn't play sounds.
# A null object, if you like.
class Player:
def __init__(self):
pass
def play(self, a_sound):
print("Here I would play a sound if I knew how")
pass
# Probably want to parse arguments to
# set the voice, etc., here
client = maryclient()
client.set_audio("WAVE_FILE") # for example
player = Player()
the_sound = client.generate("hello from Mary Text to Speech, with Python.")
if client.output_type == "AUDIO":
player.play(the_sound)
# vi:set sw=4 et:

View file

@ -0,0 +1,177 @@
#!/usr/bin/perl -T
# -*- Mode: Perl -*-
# MARY Text-to-Speech System
# CGI Script implementing a simple mary client,
# can be used for web pages.
##########################################################################
# Copyright (C) 2000-2006 DFKI GmbH.
# All rights reserved. Use is subject to license terms.
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# 1. The code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Any modifications must be clearly marked as such.
# 3. Original authors' names are not deleted.
# 4. The authors' names are not used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
# CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
##########################################################################
# Author: Marc Schroeder
use strict;
use IO::Socket;
use CGI;
# variables getting their values from form:
my ($inputtext, $in, $out, $audiotype, $voice);
# little helpers:
my ($var, $tmp);
# contacting the mary server:
my ($host, $port, $maryInfoSocket, $maryDataSocket, $id);
# helping with audio output:
my ($save_to_disk, $audiosubtype, $filename);
my $cgi = new CGI;
my @param = $cgi->param();
$inputtext = $cgi->param('inputtext');
$in = $cgi->param('in');
$out = $cgi->param('out');
$audiotype = $cgi->param('audiotype');
$save_to_disk = $cgi->param('save_to_disk');
$voice = $cgi->param('voice');
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)=localtime(time);
$year += 1900;
printf STDERR "[%04i-%02i-%02i %02i:%02i:%02i] ", $year, $mon, $mday, $hour, $min, $sec;
print STDERR "Request from ",$cgi->remote_user(),"@",$cgi->remote_host(),": \n";
print STDERR " in=",$in;
print STDERR " out=",$out;
print STDERR " audiotype=",$audiotype;
print STDERR " voice=",$voice;
print STDERR " save_to_disk=",$save_to_disk,"\n";
print STDERR " inputtext: ";
print STDERR $inputtext,"\n";
# Limit inputtext length to 5000 bytes:
if (length $inputtext > 5000) {
$inputtext = substr $inputtext, 0, 5000;
}
# set audio subtype
if ($out eq "AUDIO") {
if ($audiotype eq "AU") {
$audiosubtype = "basic";
$filename = "mary.au";
} elsif ($audiotype eq "AIFF") {
$audiosubtype = "x-aiff";
$filename = "mary.aiff";
} elsif ($audiotype eq "WAVE") {
$audiosubtype = "x-wav";
$filename = "mary.wav";
} elsif ($audiotype eq "MP3") {
$audiosubtype = "mp3";
$filename = "mary.mp3";
} else {
$audiosubtype = "x-wav";
$filename = "mary.wav";
}
}
# announce data type on stdout
if ($save_to_disk) {
print "Content-Type: application/octet-stream";
} else {
print "Content-Type: audio/$audiosubtype";
}
print "\nContent-Disposition: filename=\"$filename\"\n\n";
# contact mary server
$host = "cling.dfki.uni-sb.de";
$port = 59125;
# create a tcp connection to the specified host and port
$maryInfoSocket = IO::Socket::INET->new(Proto => "tcp",
PeerAddr => $host,
PeerPort => $port)
or die "can't connect to port $port on $host: $!";
# avoid buffering when writing to server:
$maryInfoSocket->autoflush(1); # so output gets there right away
########## Write input to server: ##########
# formulate the request:
print $maryInfoSocket "MARY IN=$in OUT=$out AUDIO=$audiotype";
if ($voice && $voice ne 'v') { print $maryInfoSocket " VOICE=$voice"; }
print $maryInfoSocket " LOG=\"REMOTE_HOST=$ENV{'REMOTE_HOST'}",
", REMOTE_ADDR=$ENV{'REMOTE_ADDR'}\"";
print $maryInfoSocket "\015\012";
# receive a request ID:
$id = <$maryInfoSocket>;
# open second socket for the data:
$maryDataSocket = IO::Socket::INET->new(Proto => "tcp",
PeerAddr => $host,
PeerPort => $port)
or die "can't connect to port $port on $host: $!";
# identify with request number:
print $maryDataSocket $id; # $id contains a newline character
# copy $inputtext to mary data socket
print $maryDataSocket $inputtext;
# mark end-of-request:
print $maryDataSocket "\015\012"; # that is a \n, actually
$maryDataSocket->shutdown(1); # we have stopped writing data
########## Read output from server: ##########
# copy the data socket to standard output
if ($out ne "AUDIO") { # text output
my $line;
while (defined ($line = <$maryDataSocket>)) {
print STDOUT $line;
}
} else { # audio data output
my $nr; # number of bytes read
my $buf; # buffer to read into
my $outnr; # number of bytes written
while($nr = read($maryDataSocket, $buf, 8192)) {
# (read returns no. of bytes read, 0 at eof)
print STDOUT $buf
or die "Write error on stdout";
} # while read something from socket
} # audio output
### Read complaints from server:
my $line;
while (defined ($line = <$maryInfoSocket>)) {
print STDERR $line;
}

View file

@ -0,0 +1,136 @@
#!/usr/bin/env perl
#
# MARY Text-to-Speech System
# Minimal Socket client (for demonstration)
##########################################################################
# Copyright (C) 2000-2006 DFKI GmbH.
# All rights reserved. Use is subject to license terms.
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# 1. The code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Any modifications must be clearly marked as such.
# 3. Original authors' names are not deleted.
# 4. The authors' names are not used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
# CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
##########################################################################
# Author: Marc Schroeder
# This is a minimal version of a socket client for the mary TtS system.
# It is intended to be used as a model for writing socket clients for
# particular applications. All input verification, command line options,
# and other luxury have been omitted.
#
# Usage:
# maryclient.pl infile.txt > outfile.wav
#
# Input/output formats and other options must be set in the perl code directly.
# See also Protocol.html for a description of the Protocol.
#
use strict;
use IO::Socket;
############################
# Package-global variables #
############################
# global settings:
my $maryInfoSocket; # handle to socket server
my $maryDataSocket; # handle to socket server
my $host; # string containing host address
my $port; # socket port on which we listen
my ($in, $out, $audiotype); # requested input / output format
my $voice; # default voice
my $id; # request ID
######################################################################
################################ main ################################
######################################################################
STDOUT->autoflush(1);
$host = "cling.dfki.uni-sb.de";
$port = 59125;
$in = "TEXT_DE";
$out = "AUDIO";
$audiotype = "MP3";
#$audiotype = "WAVE";
#$voice = "male";
$voice = "de3";
# create a tcp connection to the specified host and port
$maryInfoSocket = IO::Socket::INET->new(Proto => "tcp",
PeerAddr => $host,
PeerPort => $port)
or die "can't connect to port $port on $host: $!";
# avoid buffering when writing to server:
$maryInfoSocket->autoflush(1); # so output gets there right away
########## Write input to server: ##########
# formulate the request:
print $maryInfoSocket "MARY IN=$in OUT=$out AUDIO=$audiotype";
if ($voice) { print $maryInfoSocket " VOICE=$voice"; }
print $maryInfoSocket "\015\012";
# receive a request ID:
$id = <$maryInfoSocket>;
chomp $id; chomp $id;
# open second socket for the data:
$maryDataSocket = IO::Socket::INET->new(Proto => "tcp",
PeerAddr => $host,
PeerPort => $port)
or die "can't connect to port $port on $host: $!";
# identify with request number:
print $maryDataSocket $id, "\015\012";
# copy standard input and/or files given on the command line to the socket
while (defined (my $line = <>)) {
print $maryDataSocket $line;
}
# mark end-of-request:
print $maryDataSocket "\015\012"; # that is a \n, actually
shutdown($maryDataSocket, 1); # we have stopped writing data
########## Read output from server: ##########
# copy the data socket to standard output
if ($out ne "AUDIO") { # text output
my $line;
while (defined ($line = <$maryDataSocket>)) {
print STDOUT $line;
}
} else { # audio data output
my $nr; # number of bytes read
my $buf; # buffer to read into
my $outnr; # number of bytes written
while($nr = read($maryDataSocket, $buf, 100000)) {
# (read returns no. of bytes read, 0 at eof)
print STDOUT $buf
or die "Write error on stdout";
} # while read something from socket
} # audio output
### Read complaints from server:
my $line;
while (defined ($line = <$maryInfoSocket>)) {
print STDERR $line;
}

View file

@ -0,0 +1,261 @@
#!/usr/bin/env ruby
#
# A basic mary client in Ruby,
# kindly donated to the MARY TTS project
# by Hugh Sasse. Thanks Hugh!
# Ruby client for the MARY TTS HTTP server.
# This is for Windows only, and relies on
# the Win32-Sound gem to access the audio.
#
#
require 'rubygems'
require 'net/http'
require 'uri'
# A fairly minimal client class for the
# MARY TTS system. This uses the modern
# HTTP interface to access the server.
# At present, this doesn't wrap the methods
# which provide documentation or lists of
# voices or features.
class MaryClient
attr_accessor :host, :port
attr_accessor :input_type, :output_type
attr_accessor :locale, :audio, :voice
# Set up the defaults for the MARY TTS
# server, which is assumed to be running
# on the local host, with British voices
# installed. These may be modified with
# the appropriate methods.
# host = 127.0.0.1)
# port = 59125
# input_type = "TEXT"
# output_type = "AUDIO"
# audio = "WAVE_FILE"
# locale = "en_GB"
# voice = "dfki-prudence-hsmm"
def initialize
@host = "127.0.0.1" # The local machine
@port = 59125
@input_type = "TEXT"
@output_type = "AUDIO"
@locale = "en_GB"
@audio = "WAVE_FILE"
@voice = "dfki-prudence-hsmm"
end
# Process a text message, which with a
# new client, will return the audio.
# This is so that platform dependent parts
# are kept separate.
def generate(message)
raw_params = {"INPUT_TEXT" => message,
"INPUT_TYPE" => @input_type,
"OUTPUT_TYPE" => @output_type,
"LOCALE" => @locale,
"AUDIO" => @audio,
"VOICE" => @voice,
}
res = Net::HTTP.post_form(URI.parse("http://#{@host}:#{@port}/process"), raw_params)
res.value # Throw an exception on failure
#puts res.body
return res.body
end
end
# If this invoked as a program with no
# argumens, just give a greeting to show
# that it is working. If arguments are
# supplied, process options to work out
# what to do with the arguments.
if __FILE__ == $0
# These files are only loaded when this is
# invoked as a program.
require 'rbconfig'
require 'getoptlong'
# PLATFORM SPECIFIC CODE.
# Needs more work [!]
case Config::CONFIG['host_os']
when /darwin/i
raise NotImplementedError.new("Don't know how to play audio on a Mac")
when /linux/i
raise NotImplementedError.new("Far too many ways to play audio on Linux, you'll need to choose something")
when /sunos|solaris/i
raise NotImplementedError.new("Have not played audio on Suns for too long to implement this.")
when /java/i
raise NotImplementedError.new("Don't know how to play audio from Java ")
when /win32|cygwin|mingw32/i
# The various things that can use the Win32
# sound gem
require 'win32/sound'
# Create a player class that will play the
# sound that the Mary TTS system returns
class Player
# Play the audio passed in.
# Possibly this should receive the audio
# type so we can check that we can play it,
# but at the moment that is the
# responsibility of the user.
def self.play(sound)
Win32::Sound.play(sound, Win32::Sound::MEMORY)
end
end
else
raise NotImplementedError.new("Haven't thought how to support this OS yet")
end
client = nil
split = ""
if ARGV.size.zero?
client = MaryClient.new()
sound = client.generate("Hello from Mary Text to Speech with Ruby.")
Player.play(sound)
else
args_mode = :words
stdout_mode = :absorb
opts = GetoptLong::new(
["--audio", "-a", GetoptLong::REQUIRED_ARGUMENT],
["--echo", "-e", GetoptLong::NO_ARGUMENT],
["--help", "-h", GetoptLong::NO_ARGUMENT],
["--host", "-H", GetoptLong::REQUIRED_ARGUMENT],
["--input-type", "-i", GetoptLong::REQUIRED_ARGUMENT],
["--locale", "-l", GetoptLong::REQUIRED_ARGUMENT],
["--read", "-r", GetoptLong::NO_ARGUMENT],
["--split", "-s", GetoptLong::REQUIRED_ARGUMENT],
["--output-type", "-o", GetoptLong::REQUIRED_ARGUMENT],
["--port", "-P", GetoptLong::REQUIRED_ARGUMENT],
["--tee", "-t", GetoptLong::NO_ARGUMENT],
["--voice", "-v", GetoptLong::REQUIRED_ARGUMENT]
)
opts.each do |opt, arg|
unless ["--help", "-h"].include?(opt)
# skip if we are only getting help
client ||= MaryClient.new()
end
case opt
when "--help", "-h"
puts <<-EOHELP
Usage: #{$0} [options] [arguments]
--audio -a
Audio format. Defualt: WAVE_FILE
--echo -e
Act as an echo command and send output
arguments to the synthesizer only (not
to standard output.
Turns off --read|-r
--help -h
Print this help, then exit.
--host -H
The host which is the server.
Default: 127.0.0.1
--input-type -i
The type of the input supplied to the
TTS system. Default: TEXT
--locale -l
The locale of the input. Default: en_GB
--output-type -o
The output type from the TTS system.
Default: AUDIO
--port -P
The port for the TTS server
Default: 59125
--read -r
Read the files passed as arguments.
Turns off --echo|-e
--split -s (lines|paragraphs)
When reading files, split the input
into lines or paragraphs. Paragraphs
mean reading up to the next double
newline. Note, the argument is literally
"lines" or "paragraphs" (or some
abbreviation of those) without the
quotes.
Default is paragraphs.
--tee -t
Act as tee: send the output to the TTS
system, and to standard output.
--voice -v
The voice to use.
Default: dfki-prudence-hsmm
EOHELP
exit(0)
when "--audio", "-a"
client.audio = arg
when "--echo", "-e"
args_mode = :words
when "--host", "-H"
client.host = arg
when "--input-type", "-i"
client.input_type = arg
when "--locale", "-l"
client.locale = arg
when "--output-type", "-o"
client.output_type = arg
when "--port", "-P"
client.port = arg.to_i
when "--read", "-r"
args_mode = :files
when "--split", "-s"
case arg
when /^p/i
split = ""
when /^l/i
split = $/
end
when "--tee", "-t"
stdout_mode = :emit
when "--voice", "-v"
client.voice = arg
end
end
client ||= MaryClient.new()
case args_mode
when :words
input_text = ARGV.join(" ")
unless input_text =~ /\A\s*\Z/m
sound = client.generate(input_text)
if client.output_type == "AUDIO"
Player.play(sound)
end
end
if stdout_mode == :emit
puts input_text
end
when :files
# Slurp in paragraphs so sentences
# don't get broken in stupid places.
$/ = split # paragraph mode
ARGF.each do |paragraph|
begin
unless paragraph =~ /\A\s*\Z/m
sound = client.generate(paragraph)
if client.output_type == "AUDIO"
# and client.audio == "WAVE_FILE"
Player.play(sound)
end
end
rescue Exception => e
puts "got error #{e} while trying to say #{paragraph.inspect}"
raise
end
if stdout_mode == :emit
puts paragraph
end # end if
end # end ARGF.each
end # end case
end # if ARGV.size.zero?
end

View file

@ -0,0 +1,705 @@
# Tcl/Tk MARY TTS client.
# This has been tested on Windows, and because
# of the use of sound there will be portability
# issues. However, there should be enough here
# for a reasonable start at a client, for any
# platform that supports Tcl/Tk. The platform
# specific code has, as far as possible, been
# isolated in the part of the code that detects
# whether this is being run as a program.
# Notes:
# More work will need to be done with this,
# in order to make the code clean. It should
# probably be wrapped in a package, to solve
# any namespace issues. There are a lot of
# global variables. It seems that some of
# these are necessary for the menus to work.
# Handling of temporary files could be improved.
# TODO:
# Create modifier sliders, for the effects.
# Extend the query proc to make use of them.
# Turn the Help menu into something more useful.
# Debug the actions for the Edit menu.
# Provide a means of getting example inputs
# from the server.
# Provide a means of re-loading all the
# dynamically collected information when the
# server is changed from the menu. This means
# that we need to delete the existing menu
# entries in order to add them correctly.
# How do we ensure temporary files are removed
# in the event of a problem? if {catch {}} ...?
# Maybe leaving them around is diagnostic info?
# Make that an option?
# Add error handling code for network and disk
# failures likely to beset such clients.
# Add sensible defaults for things the user must
# always set at startup, but these will be
# platform spacific. Always default to Audio
# output for example, or is it possible that
# people have no voices installed?
# This is a GUI, so:
package require Tk
# We are communicating with the Mary server
# with HTTP.
package require http
# Use the local machine in preference to the
# one in Germany.
set mary_tts_default_host "127.0.0.1"
set mary_tts_default_port 59125
# Actual host and port, and global old
# copies to allow revert on cancel in the
# dialogues. Apparently upvar #0 is the
# norm for that sort of thing [Tcl Wiki]
set mary_tts_host $mary_tts_default_host
set old_mary_tts_host $mary_tts_host
set mary_tts_port $mary_tts_default_port
set old_mary_tts_port $mary_tts_port
# Informational URLs
set informational_urls [ list \
version datatypes voices \
audioformats audioeffects ]
#######
# Obtain a static page from the server, i.e.
# no parameters are needed to get it.
proc get_page { relative_url } {
global mary_tts_host mary_tts_port
set url http://$mary_tts_host:$mary_tts_port/$relative_url
set result [::http::geturl $url]
return [::http::data $result]
}
proc list_of_lines {str} {
return [ split $str "\n" ]
}
# We will need to collect this information
# when we have the server and port chosen.
proc get_audioeffects {} {
return [list_of_lines [get_page audioeffects] ]
}
proc get_audioformats {} {
return [list_of_lines [get_page audioformats] ]
}
proc get_datatypes {} {
return [ list_of_lines [get_page datatypes] ]
}
proc get_voices {} {
return [list_of_lines [get_page voices] ]
}
# Handling post queries.
# Submit the query to the server, using the
# http POST method.
proc make_query {url encoded_params} {
set http [::http::geturl $url -query $encoded_params]
set result [::http::data $http]
return $result
}
# Get the text from the input text area
proc get_input_text {} {
return [.io.inp.input_area get 1.0 end]
}
# Get the text from the output text area
proc get_output_text {} {
return [.io.out.output_area get 1.0 end]
}
# Collect the audio data from the server.
proc collect_audio_data {text_to_process} {
global mary_tts_host mary_tts_port
global inputtype outputtype locales
global audioformat voice
set url "http://$mary_tts_host:$mary_tts_port/process"
# ::http::formatQuery converts a list of
# key value pairs into the correct format
# for http POST.
set params [::http::formatQuery INPUT_TEXT $text_to_process INPUT_TYPE $inputtype OUTPUT_TYPE $outputtype LOCALE $locales($voice) AUDIO $audioformat VOICE $voice ]
set result [make_query $url $params]
return $result
}
# Pushes the query to the server and gets
# the results back, displaying or playing
# them.
proc generate_output {text_to_process} {
global outputtype
set result [collect_audio_data $text_to_process]
if {$outputtype eq "AUDIO"} {
# call the platform dependent implementation.
play $result
} else {
clear_output
add_message $result
}
# Return the result so we can save it if
# the user requires it.
return $result
}
# These next procs are for handling the
# lists of data one gets back from the server
# which possibly have several words per line,
# separated by spaces.
# If the first word of each listed line is
# significant, extract the list of first words.
proc collect_first_words_of_phrase_list {a_list} {
for {set i 0} {$i < [llength $a_list]} {incr i} {
set data [lindex $a_list $i ]
set word [ lindex [split $data " "] 0 ]
lappend words $word
}
return $words
}
# If the second word of each listed line is
# significant, extract the list of second words.
proc collect_second_words_of_phrase_list {a_list} {
for {set i 0} {$i < [llength $a_list]} {incr i} {
set data [lindex $a_list $i ]
set word [ lindex [split $data " "] 1 ]
lappend words $word
}
return $words
}
# The list of datatypes must be separated into
# input data types and output data types so that
# interactions with the server make sense.
# This handles the inputs.
proc collect_first_words_of_input_types {a_list} {
for {set i 0} {$i < [llength $a_list]} {incr i} {
set data [lindex $a_list $i ]
if {[ string match -nocase "*input*" $data ]} {
set word [ lindex [split $data " "] 0 ]
lappend words $word
}
}
return $words
}
# The list of datatypes must be separated into
# input data types and output data types so that
# interactions with the server make sense.
# This handles the outputs.
proc collect_first_words_of_output_types {a_list} {
for {set i 0} {$i < [llength $a_list]} {incr i} {
set data [lindex $a_list $i ]
if {[string match -nocase "*output*" $data]} {
set word [ lindex [split $data " "] 0 ]
lappend words $word
}
}
return $words
}
# setup all the variables to hold voices,
# audio options, etc., based on what the
# server can do.
proc setup_globals {} {
global audioeffects audioformats voices
global inputtypes outputtypes audioformat voice
global inputtype outputtype locales
set audioeffects [get_audioeffects]
set audioformats [get_audioformats]
set audioformat [lindex $audioformats 0 ]
set datatypes_data [get_datatypes]
set inputtypes [collect_first_words_of_input_types $datatypes_data]
set inputtype [lindex $inputtypes 0]
set outputtypes [collect_first_words_of_output_types $datatypes_data]
set outputtype [lindex $outputtypes 0]
set voices_data [get_voices]
set voices [collect_first_words_of_phrase_list $voices_data]
set locales_list [collect_second_words_of_phrase_list $voices_data ]
for {set i 0} {$i < [llength $voices]} {incr i} {
set locales([lindex $voices $i]) [lindex $locales_list $i]
}
set voice [lindex $voices 0]
}
# A general procedure for filling in the
# elements of a listbox from a list.
# At present this is unused, but it could
# be useful later. [It took a while to
# figure out so I'm not ready to kill it
# with YAGNI.]
proc add_listbox_items {a_var a_widget} {
upvar $a_var var
foreach item $var {
$a_widget insert end $item
}
}
# Create the menubuttons along the top.
# Usual File, Edit and Help menus plus
# those to set attributes.
proc create_menubuttons {} {
set buttons [ list file File edit Edit \
server "Server" \
inputtype "Input type" outputtype "Output type" \
voice Voice \
audioformat "Audio format" \
textstyle "Text style" help Help ]
set count 1
foreach { menu_tag string_tag} $buttons {
menubutton .menus.$menu_tag -text $string_tag \
-menu .menus.${menu_tag}.menu -underline 0 -font ClientFont
menu .menus.${menu_tag}.menu -tearoff true
grid .menus.$menu_tag -in .menus -row 1 -column $count -sticky w
incr count
}
}
# Get the contents of a text file for reading
# or loading into a text widget, etc.
proc text_file_contents {what_for} {
set a_file [tk_getOpenFile -title $what_for ]
set the_text ""
if {$a_file != ""} {
set a_stream [open $a_file r ]
set the_text [read $a_stream]
close $a_stream
}
return $the_text
}
# Save the_text to a text file specified
# by the user, for the given reason (what_for).
# At the moment there is no error handling
# for this (disk full, write protected, etc).
proc save_text_file {the_text what_for} {
set a_file [tk_getSaveFile -title $what_for -parent .]
if {$a_file != ""} {
set a_stream [open $a_file w ]
puts $a_stream $the_text
close $a_stream
}
}
# Save the_data to a binary file specified
# by the user, for the given reason (what_for),
# a text string.
# At the moment there is no error handling
# for this (disk full, write protected, etc).
proc save_binary_file {the_data what_for} {
set a_file [tk_getSaveFile -title $what_for -parent .]
if {$a_file != ""} {
set a_stream [open $a_file w ]
fconfigure $a_stream -translation binary
puts -nonewline $a_stream $the_data
close $a_stream
}
}
# Create the menu for File operations
proc create_menu_file {} {
set fmenu .menus.file.menu
$fmenu add command -label "New" \
-font ClientFont -command {
.io.inp.input_area delete 1.0 end
}
# Replace the contents of the input text
# widget by the data from the open file.
# <FIXME>YAGNI, but is there any reason
# to allow inserting a file, rather than
# replacing the text with file contents?
# </FIXME>
$fmenu add command -label "Open" \
-font ClientFont -command {
set the_text [text_file_contents "File to load"]
if {$the_text != ""} {
.io.inp.input_area delete 1.0 end
.io.inp.input_area insert end $the_text
}
}
$fmenu add command -label "Read" \
-font ClientFont -command {
generate_output [text_file_contents "File to read"]
}
# How to make these disabled for now?
$fmenu add command -label "Save Input" \
-font ClientFont -command {
set the_text [get_input_text]
save_text_file $the_text "Save Input"
}
$fmenu add command -label "Save Output" \
-font ClientFont -command {
set the_text [get_output_text]
save_text_file $the_text "Save Output"
}
}
# Create the menu for edit operations
proc create_menu_edit {} {
set emenu .menus.edit.menu
$emenu add command -label "Select All from Input Area" \
-font ClientFont -command {
# This code says copy the selection as well.
# May be wrong for some platforms, but is
# it more useful?
.io.inp.input_area tag add sel 1.0 end
event generate .io.inp.input_area <<copy>>
}
$emenu add command -label "Select All from Output Area" \
-font ClientFont -command {
# This code says copy the selection as well.
# May be wrong for some platforms, but is
# it more useful?
.io.out.output_area tag add sel 1.0 end
event generate .io.out.output_area <<Copy>>
}
$emenu add command -label "Copy from Input Area" \
-font ClientFont -command {
# this appears not to work. FIXME
event generate .io.inp.input_area <<Copy>>
}
$emenu add command -label "Copy from Output Area" \
-font ClientFont -command {
# this appears not to work. FIXME
event generate .io.out.output_area <<copy>>
}
$emenu add command -label "Paste into Input Area" \
-font ClientFont -command {
# this appears not to work. FIXME
event generate .io.inp.input_area <<Paste>>
}
$emenu add command \
-font ClientFont -label "Insert example text into Input Area"\
-command {
}
# Add specific editing commands here later.
# For example, we would like to be able to
# add whole tags to the XML based formats,
# wrap matching tags around selected text.
# Also we need to find out what happens with
# copy cut and paste, given that X Windows
# is different from MS Windows.
# Allow example text to be inserted.
# However, my thinking is that this should not
# overwrite as it is in the Java application,
# because this rubs out edits when switching
# voices, and this can be annoying when
# exploring the system.
}
# Set the server properties, mostly just
# host and port. Maybe later protocol will
# be possible for https connections?
proc create_menu_server {} {
set smenu .menus.server.menu
$smenu add command -label "host" -font ClientFont -command {
create_entry_dialog "MARY TTS server name" "hostname/IP Address" mary_tts_host
}
$smenu add command -label "port" -font ClientFont -command {
create_entry_dialog "MARY TTS server port" "pott number" mary_tts_port
}
}
# setup the fonts for the various areas on the dipslay.
proc setup_font {family size} {
foreach win {.io .controls .entry.dialogue } {
font configure ClientFont -family $family -size $size
}
}
# Create the menu for changing the text size.
proc create_menu_textstyle {} {
set tmenu .menus.textstyle.menu
$tmenu add cascade -label "Courier" -underline 0 -menu \
$tmenu.courier -font ClientFont
$tmenu add cascade -label "Times" -underline 0 -menu \
$tmenu.times -font ClientFont
$tmenu add cascade -label "Helvetica" -underline 0 -menu \
$tmenu.helvetica -font ClientFont
foreach {name family} [list $tmenu.courier Courier \
$tmenu.times Times $tmenu.helvetica Helvetica ] {
set m1 [menu $name]
foreach pts {6 7 8 9 10 12 14 16 18 20 24 28 32 36} {
$m1 add command -label "$pts" -font ClientFont\
-command [list setup_font $family $pts ]
}
}
}
# Create the menu for Help
proc create_menu_help {} {
# This is all pretty much "wet paint"
# Is there enough to merit separate menus?
set hmenu .menus.help.menu
$hmenu add command -label "Introduction" -font ClientFont\
-command {
tk_messageBox -message "This is a basic Tcl/Tk
client for the MARY TTS system. Most of the options
are reached through the menus on the top. Some
facilities are presently lacking.
Most of the interface should be self-explanatory.
In the File menu, Read will read a given file aloud
(or at least take it as input for the present
form of processing), whereas Open will load it
into the input area. Save input and Save output
refer to the contents of the text windows. The
save button next to the play button will save
the output to a file; this is assumed to be a
text file, unless the output is audio, in which
case it is a binary file.
The Edit menu has cut and paste facilities,
but these don't seem to work reliably. The
default key bindings for text areas should
be useable.
You will need to set the input and output types
and the audio format before pressing play.
Code does not yet exist to figure out sensible
defaults for your platform.
This does not have support for the effects, yet.
Contributions from developers welcome." -type ok
}
$hmenu add command -label "About" -command {} -font ClientFont
}
# We need to create menus for the available
# voices and audio formats, etc.
# When we have the data for these menus from
# the server, create them by using the global
# lists of information.
proc create_radio_menu_from_list {what} {
global $what
set plural "${what}s"
upvar 1 $plural var
foreach item $var {
.menus.${what}.menu add radiobutton -label $item -variable $what \
-value $item -font ClientFont
}
}
proc reset_entry_and_var {a_variable} {
upvar #0 $a_variable var
upvar #0 old_$a_variable old_var
set var $old_var
destroy .entry_dialogue
}
# Create the toplevel for choosing a host
# or port, something taken from an entry.
proc create_entry_dialog {a_message a_label a_variable} {
upvar #0 $a_variable var
upvar #0 old_$a_variable old_var
toplevel .entry_dialogue
label .entry_dialogue.the_message -text $a_message \
-font ClientFont
label .entry_dialogue.the_label -text $a_label -font ClientFont
entry .entry_dialogue.the_entry -textvariable $a_variable \
-font ClientFont
button .entry_dialogue.ok -text "OK" -font ClientFont -command {
destroy .entry_dialogue
}
button .entry_dialogue.cancel -text "Cancel" -font ClientFont \
-command "reset_entry_and_var $a_variable"
grid .entry_dialogue.the_message -row 1 -column 1
grid .entry_dialogue.the_label -row 2 -column 1
grid .entry_dialogue.the_entry -row 2 -column 2
grid .entry_dialogue.ok -row 3 -column 1
grid .entry_dialogue.cancel -row 3 -column 2
}
# Add a message to the end of the output
# text widget.
proc add_message {a_message} {
.io.out.output_area configure -state normal
.io.out.output_area insert end $a_message
.io.out.output_area configure -state disabled
}
# Clear the text in the output text widget.
proc clear_output {} {
.io.out.output_area configure -state normal
.io.out.output_area delete 1.0 end
.io.out.output_area configure -state disabled
}
# Sound generation is platform dependent.
# This provides an "abstract" function to
# be overridden by the platform dependent
# code. In this case it alerts the user
# in the output window that nothing is going
# to happen.
proc play {sound} {
add_message \
"play sound not implemented on this platform apparently"
}
# Graphical stuff.
# In order to be able to scale the font, define a font.
font create ClientFont -family [font actual TkDefaultFont -family] \
-size [font actual TkDefaultFont -size]
frame .menus
create_menubuttons
create_menu_file
create_menu_edit
create_menu_server
create_menu_textstyle
create_menu_help
# Fill in the other menus at runtime.
# .io communicates text with the user,
# through an input and output window.
frame .io
frame .io.inp
frame .io.out
# .controls will hold the play button and
# the effects controls.
frame .controls
# Draw the controls in .io
label .io.inp.input_label -text "Input Area" -font ClientFont
text .io.inp.input_area -height 10 -width 40 \
-xscrollcommand ".io.inp.input_x set" \
-yscrollcommand ".io.inp.input_y set" -font ClientFont
scrollbar .io.inp.input_x -orient horizontal \
-command ".io.inp.input_area xview"
scrollbar .io.inp.input_y -orient vertical \
-command ".io.inp.input_area yview"
label .io.out.output_label -text "Output Area" -font ClientFont
text .io.out.output_area -height 10 -width 40 -state disabled \
-xscrollcommand ".io.out.output_x set" \
-yscrollcommand ".io.out.output_y set" -font ClientFont
scrollbar .io.out.output_x -orient horizontal \
-command ".io.out.output_area xview"
scrollbar .io.out.output_y -orient vertical \
-command ".io.out.output_area yview"
grid .io.inp -in .io -row 1 -column 1
grid .io.out -in .io -row 1 -column 2
grid .io.inp.input_label -in .io.inp -row 1 -column 1
grid .io.inp.input_area -in .io.inp -row 2 -column 1
grid .io.inp.input_y -in .io.inp -row 2 -column 2 -sticky ns
grid .io.inp.input_x -in .io.inp -row 3 -column 1 -sticky ew
grid .io.out.output_label -in .io.out -row 1 -column 1
grid .io.out.output_area -in .io.out -row 2 -column 1
grid .io.out.output_y -in .io.out -row 2 -column 2 -sticky ns
grid .io.out.output_x -in .io.out -row 3 -column 1 -sticky ew
button .controls.play -text "play" -font ClientFont -command {
generate_output [get_input_text]
}
grid .controls.play -in .controls -row 1 -column 1
button .controls.save -text "save" -font ClientFont -command {
global outputtype
set input_text [get_input_text]
if { $outputtype eq "AUDIO" } {
save_binary_file [collect_audio_data $input_text ] "Save audio file"
} else {
save_text_file [collect_audio_data $input_text ] "Save output to file"
}
}
grid .controls.save -in .controls -row 1 -column 2
pack .menus .io .controls -in . -side top
# Detect whether this is the main program
# This test was taken from the Tcl Wiki, and
# seems to work OK.
if {[info exists argv0] && [file tail [info script]] eq [file tail $argv0]} {
# Try to find the temporary files directory.
catch { set tmpdir "/tmp" }
catch { set tmpdir $::env(TRASH_FOLDER) }
catch { set tmpdir $::env(TMP) }
catch { set tmpdir $::env(TEMP) }
# <FIXME>This needs better handling of
# possible alternatives</FIXME>
# This is needed for Windows sound only.
# Do the platform dependent things.
if {$tcl_platform(platform) eq "windows"} {
package require twapi
proc play {sound} {
global tmpdir
# Write sound to a temporary file
set sndfile [file join $tmpdir "MARYTTS_sound.[pid].wav" ]
set stream [open $sndfile w]
# Make sure the file is binary:
fconfigure $stream -translation binary
puts -nonewline $stream $sound
close $stream
# Play the file.
::twapi::play_sound $sndfile
# Remove the file.
file delete $sndfile
}
}
# Put other platforms here.
# Setup the globals with reference to the
# server, which is assumed to be working.
# Since we have options to alter this with
# menu items, there probably needs to be
# some way to reload all this. But we need
# to know how to delete the existing menu
# entries to do that.
setup_globals
create_radio_menu_from_list inputtype
create_radio_menu_from_list outputtype
create_radio_menu_from_list voice
create_radio_menu_from_list audioformat
# Note, at the moment voices holds locales,
# gender, and voice type
# At the moment this is just diagnostic:
## add_message [ join $voices "\n" ]
# it tells us we have a basically working
# system and the list of voices has been
# picked up and manipulated correctly.
# So it is commented out now.
}

View file

@ -0,0 +1,81 @@
<html>
<!--
Copyright 2000-2006 DFKI GmbH.
All Rights Reserved. Use is subject to license terms.
Permission is hereby granted, free of charge, to use and distribute
this software and its documentation without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of this work, and to
permit persons to whom this work is furnished to do so, subject to
the following conditions:
1. The code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Any modifications must be clearly marked as such.
3. Original authors' names are not deleted.
4. The authors' names are not used to endorse or promote products
derived from this software without specific prior written
permission.
DFKI GMBH AND THE CONTRIBUTORS TO THIS WORK DISCLAIM ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DFKI GMBH NOR THE
CONTRIBUTORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
-->
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<link rel=stylesheet type="text/css" href="mary.css">
<script type="text/javascript">
</head>
<body>
<h1 align="center">The MARY Text-to-Speech System: Online Demo</h1>
<div align="center">
<p>Synthesise any text online (max. 5000 characters):
</p>
<form name="InputForm" method="get" action="maryclient.cgi">
<!-- Set the language of your input here: TEXT_DE=German, TEXT_EN=English... -->
<input type="hidden" name="in" value="TEXT"/>
<input type="hidden" name="out" value="AUDIO"/>
<p>Input text:</p>
<textarea name="inputtext" rows=8 cols=50 wrap=virtual>
Welcome to the world of speech synthesis!
</textarea><br>
<table align="center">
<tr>
<td>
Voice:
<select name="voice">
<option value="us1">us1 (American English, female)</option>
<option value="us2">us2 (American English, male)</option>
</select>
</td><td>
Audio format:
<select name="audiotype">
<option value="AU">Sun Audio, 16 bit</option>
<option value="AIFF">AIFF, 16 bit</option>
<option value="WAVE" selected>WAV, 16 bit</option>
<option value="MP3">mp3</option>
</select>
</td>
</tr>
<tr>
<td align="right">
<input type=submit value="Speak">
</td><td align="left">
<input type=reset value="Reset">
</td>
</tr>
</table>
<p>
<input type=radio name="save_to_disk" value="0" checked>Hear directly
&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;
<input type=radio name="save_to_disk" value="1">Save to disk
</form>
</div>
</body>
</html>

View file

@ -0,0 +1,20 @@
<HTML>
<HEAD>
<TITLE>EmoSpeak Applet</TITLE>
</HEAD>
<BODY>
<H3><HR WIDTH="100%">EmoSpeak Applet<HR WIDTH="100%"></H3>
To properly run the emospeak applet, the following conditions must be fulfilled:
<br>
1. The emospeakapplet.jar and maryclient.jar files must be in the same directory as this html file.
<br>
2. On the server machine from which the html file is loaded, a MARY server with German voices must be running.
<br>
<P>
<APPLET archive="emospeakapplet.jar, maryclient.jar" code="marytts.tools.emospeak.EmoSpeakApplet.class" width=550 height=600></APPLET>
</P>
</BODY>
</HTML>

View file

@ -0,0 +1,94 @@
#!/bin/sh -e
#
# marytts This init.d script is used to start a MARY TTS server.
# Run process as same user as this script:
SUDO=""
# Run process as user 'mary':
#SUDO="sudo -u mary"
JAVA=/usr/bin/java
MARY_BASE="/usr/local/MARY TTS"
MARY_PIDFILE=/var/run/marytts.pid
pidof_marytts() {
PIDOF=`which pidof` || true
if [ -x "$PIDOF" ]; then # command exists
PIDS=`pidof java` || true
else # no pidof command
PIDS=`ps axc|awk "{if (\\$5==\\"java\\") print \\$1}"`
fi
[ -e $MARY_PIDFILE ] && PIDS2=`cat $MARY_PIDFILE`
# if there is a pid we need to verify that belongs to MARY
# for real
for i in $PIDS; do
if [ "$i" = "$PIDS2" ]; then
# in this case the pid stored in the
# pidfile matches one of the pidof java
# so a simple kill will make it
echo $i
return 0
fi
done
return 1
}
marytts_start() {
CLASSPATH="$MARY_BASE/java/mary-common.jar:$MARY_BASE/java/log4j-1.2.15.jar"
$SUDO $JAVA -ea -Xms40m -Xmx1g -cp "$CLASSPATH" -Dmary.base="$MARY_BASE" marytts.server.Mary &
PID="$!"
echo $PID > $MARY_PIDFILE
}
marytts_stop() {
PID=$(pidof_marytts)
if [ "${PID}" ]; then
kill $PID
fi
}
case $1 in
start)
echo "Starting TTS server"
if marytts_start; then
exit 0
else
exit 1
fi
;;
stop)
echo "Stopping TTS server"
if marytts_stop; then
exit 0
else
exit 1
fi
;;
restart)
echo "Restarting TTS server"
marytts_stop
sleep 10
if marytts_start; then
exit 0
else
exit 1
fi
;;
status)
PID=$(pidof_marytts) || true
if [ "${PID}" ]; then
echo "MARY TTS is running (pid $PID)"
else
echo "MARY TTS is not running"
fi
exit 0
;;
*)
echo "Usage: /etc/init.d/marytts {start|stop|restart|status}"
exit 1
;;
esac

View file

@ -0,0 +1,242 @@
<?xml version="1.0" encoding="UTF-8"?><marytts-install xmlns="http://mary.dfki.de/installer">
<voice locale="de" name="bits1-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female German hidden semi-Markov model voice, built from voice recordings provided by the BITS project at the Bavarian Archive of Speech Signals</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="69aeb40bc27492d887f7d60bd8afc2aa" filename="voice-bits1-hsmm-5.1.zip" size="1360761">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="de" name="bits3" gender="male" type="unit selection" version="5.1">
<description>A male German unit selection voice, built from voice recordings provided by the BITS project at the Bavarian Archive of Speech Signals</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="e11f0d1057dd80d5ab307eaf3c2a2e81" filename="voice-bits3-5.1.zip" size="278237075">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="de" name="bits3-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male German hidden semi-Markov model voice, built from voice recordings provided by the BITS project at the Bavarian Archive of Speech Signals</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="d535206d88da7f5b066a4624c321ee73" filename="voice-bits3-hsmm-5.1.zip" size="1557124">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="de" name="dfki-pavoque-neutral" gender="male" type="unit selection" version="5.1">
<description>A male German unit selection voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="b2067addeb337de0e31c1e0057ccaf93" filename="voice-dfki-pavoque-neutral-5.1.zip" size="446054145">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="de" name="dfki-pavoque-neutral-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male German hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="c064426f03689cc0681514818495b347" filename="voice-dfki-pavoque-neutral-hsmm-5.1.zip" size="2835023">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="de" name="dfki-pavoque-styles" gender="male" type="unit selection" version="5.1">
<description>A male German unit selection voice with expressive styles "happy", "sad", "angry", and "poker"</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="f497dd93f78fa4a2350adea1196ccd7a" filename="voice-dfki-pavoque-styles-5.1.zip" size="692113207">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="de" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-poppy" gender="female" type="unit selection" version="5.1">
<description>A female British English expressive unit selection voice: Cheerful Poppy</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="8a9bd4c4c888e99f8c367cdac614af9c" filename="voice-dfki-poppy-5.1.zip" size="99318417">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-poppy-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female British English hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="04f1b36ee7113b13e01d3b131c6af75f" filename="voice-dfki-poppy-hsmm-5.1.zip" size="1015901">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-prudence" gender="female" type="unit selection" version="5.1">
<description>A female British English expressive unit selection voice: Pragmatic Prudence</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="7c81cbc736450219ea1597b217ea9b8f" filename="voice-dfki-prudence-5.1.zip" size="250841190">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-prudence-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female British English hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="87a27d0ae3eed71d1d34bd3337f06501" filename="voice-dfki-prudence-hsmm-5.1.zip" size="1560473">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-obadiah" gender="male" type="unit selection" version="5.1">
<description>A male British English expressive unit selection voice: Gloomy Obadiah</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="dde06a7a4849b922bc45b123a5b7b504" filename="voice-dfki-obadiah-5.1.zip" size="146431509">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-obadiah-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male British English hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="eb9872af54a1f944f191b2b7237f8d25" filename="voice-dfki-obadiah-hsmm-5.1.zip" size="1216409">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-spike" gender="male" type="unit selection" version="5.1">
<description>A male British English expressive unit selection voice: Aggressive Spike</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="f1161fd1da6306cc65d054cd8ff75f4e" filename="voice-dfki-spike-5.1.zip" size="136165028">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_GB" name="dfki-spike-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male British English hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="a0bfd497423bf504dc3c985476b4e669" filename="voice-dfki-spike-hsmm-5.1.zip" size="1083544">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-GB" version="5.1"/>
</voice>
<voice locale="en_US" name="cmu-slt" gender="female" type="unit selection" version="5.1">
<description>A female US English unit selection voice, built from recordings provided by Carnegie Mellon University</description>
<license href="http://mary.dfki.de/download/arctic-license.html"/>
<package md5sum="0fe2293ba1f9a0e373b54d79f0a701cc" filename="voice-cmu-slt-5.1.zip" size="105909149">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-US" version="5.1"/>
</voice>
<voice locale="en_US" name="cmu-bdl-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male US English hidden semi-Markov model voice, built from recordings provided by Carnegie Mellon University</description>
<license href="http://mary.dfki.de/download/arctic-license.html"/>
<package md5sum="b192a878fb782b48c2ea37deed769c28" filename="voice-cmu-bdl-hsmm-5.1.zip" size="1017477">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-US" version="5.1"/>
</voice>
<voice locale="en_US" name="cmu-rms-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male US English hidden semi-Markov model voice, built from recordings provided by Carnegie Mellon University</description>
<license href="http://mary.dfki.de/download/arctic-license.html"/>
<package md5sum="fc06214cf245ecba0c2ddf437149bd89" filename="voice-cmu-rms-hsmm-5.1.zip" size="1028060">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="en-US" version="5.1"/>
</voice>
<voice locale="fr" name="enst-camille" gender="female" type="unit selection" version="5.1">
<description>A female French unit selection voice, built at Télécom ParisTech (ENST) using data recorded by Camille Dianoux</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="ba18cb28526de107a9b6b8c76d5404ff" filename="voice-enst-camille-5.1.zip" size="183466604">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="enst-camille-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female French hidden semi-Markov model voice, built at Télécom ParisTech (ENST) using data recorded by Camille Dianoux</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="19cb43b7fed60b732aa1c2e0e86dff03" filename="voice-enst-camille-hsmm-5.1.zip" size="1518635">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="upmc-jessica" gender="female" type="unit selection" version="5.1">
<description>A female French unit selection voice, built at ISIR (UPMC) using data recorded by Jessica Durand</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="ba2daa2a0c69164932ff833817fa4b0e" filename="voice-upmc-jessica-5.1.zip" size="126834351">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="upmc-jessica-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female French hidden semi-Markov model voice, built at ISIR (UPMC) using data recorded by Jessica Durand</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="0e95f86b9297f2c60fa6286432d20d84" filename="voice-upmc-jessica-hsmm-5.1.zip" size="1118972">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="enst-dennys-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male Québécois French hidden semi-Markov model voice, built at Télécom ParisTech (ENST)</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="ee95a5257ea4aba46591cfe7c4429d84" filename="voice-enst-dennys-hsmm-5.1.zip" size="1676376">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="upmc-pierre" gender="male" type="unit selection" version="5.1">
<description>A male French unit selection voice, built at ISIR (UPMC) using data recorded by Pierre Chauvin</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="3120b9dadfb56046e86e64c124d1fa82" filename="voice-upmc-pierre-5.1.zip" size="171764059">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="fr" name="upmc-pierre-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male French hidden semi-Markov model voice, built at ISIR (UPMC) using data recorded by Pierre Chauvin</description>
<license href="http://mary.dfki.de/download/by-sa-3.0.html"/>
<package md5sum="064e272e3e3283134b44df8eb768dd71" filename="voice-upmc-pierre-hsmm-5.1.zip" size="1557436">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="fr" version="5.1"/>
</voice>
<voice locale="it" name="istc-lucia-hsmm" gender="female" type="hsmm" version="5.1">
<description>Italian female Hidden semi-Markov model voice kindly made available by Fabio Tesser</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="68258f5b94c596ac961e9bc45be0e521" filename="voice-istc-lucia-hsmm-5.1.zip" size="1466943">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="it" version="5.1"/>
</voice>
<voice locale="ru" name="voxforge-ru-nsh" gender="male" type="unit selection" version="5.1">
<description>Russian male voice kindly made available by Nickolay V. Shmyrev</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="b0ec23e41a2e9c2485b55e640ef32fb3" filename="voice-voxforge-ru-nsh-5.1.zip" size="175120753">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="ru" version="5.1"/>
</voice>
<voice locale="te" name="cmu-nk" gender="female" type="unit selection" version="5.1">
<description>A female Telugu unit selection voice built from voice recordings provided by IIIT Hyderabad and Carnegie Mellon University</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="2fb7b6b6c9e9eb12ea2ebcba4f356505" filename="voice-cmu-nk-5.1.zip" size="495885808">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="te" version="5.1"/>
</voice>
<voice locale="te" name="cmu-nk-hsmm" gender="female" type="hsmm" version="5.1">
<description>A female Telugu hidden semi-Markov model voice built from voice recordings provided by IIIT Hyderabad and Carnegie Mellon University</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="8dd7545a096dd87275b51028bc7623c3" filename="voice-cmu-nk-hsmm-5.1.zip" size="3397557">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="te" version="5.1"/>
</voice>
<voice locale="tr" name="dfki-ot" gender="male" type="unit selection" version="5.1">
<description>A male Turkish unit selection voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="c9940e0a58578fc02113b61ac86a6ebf" filename="voice-dfki-ot-5.1.zip" size="157783972">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="tr" version="5.1"/>
</voice>
<voice locale="tr" name="dfki-ot-hsmm" gender="male" type="hsmm" version="5.1">
<description>A male Turkish hidden semi-Markov model voice</description>
<license href="http://mary.dfki.de/download/by-nd-3.0.html"/>
<package md5sum="211f3d322f19295b5a7671020d76552e" filename="voice-dfki-ot-hsmm-5.1.zip" size="1366536">
<location folder="true" href="http://mary.dfki.de/download/5.1/"/>
</package>
<depends language="tr" version="5.1"/>
</voice>
</marytts-install>

675
external/marytts-5.1.2/gpl-3.0.txt vendored Normal file
View file

@ -0,0 +1,675 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="de" name="de" version="5.1.2">
<description>de language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-de-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-de-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="en-GB" name="en-GB" version="5.1.2">
<description>en-GB language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-en-GB-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-en-GB-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="en-US" name="en-US" version="5.1.2">
<description>en-US language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-en-US-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-en-US-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="fr" name="fr" version="5.1.2">
<description>fr language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-fr-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-fr-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="it" name="it" version="5.1.2">
<description>it language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-it-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-it-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="ru" name="ru" version="5.1.2">
<description>ru language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-ru-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-ru-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="sv" name="sv" version="5.1.2">
<description>sv language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-sv-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-sv-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="te" name="te" version="5.1.2">
<description>te language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-te-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-te-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,10 @@
<marytts-install xmlns="http://mary.dfki.de/installer">
<language locale="tr" name="tr" version="5.1.2">
<description>tr language component</description>
<license href="http://www.gnu.org/licenses/lgpl-3.0-standalone.html"/>
<package filename="marytts-lang-tr-5.1.2.zip" md5sum="dummy" size="0">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/marytts-lang-tr-5.1.2.jar</files>
</language>
</marytts-install>

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<marytts-install xmlns="http://mary.dfki.de/installer">
<voice gender="female" locale="en-US" name="cmu-slt-hsmm"
type="hsmm" version="5.1.2">
<description>A female US English Hidden semi-Markov model voice, built from voice recordings provided by Carnegie Mellon University.</description>
<license href="http://mary.dfki.de/download/voices/arctic-license.html"/>
<package filename="voice-cmu-slt-hsmm-5.1.2.zip"
md5sum="${zip_md5}" size="${zip_size}">
<location href="http://mary.dfki.de/download/5.1.2/"/>
</package>
<files>lib/voice-cmu-slt-hsmm-5.1.2.jar</files>
<depends language="en-US" version="5.1.2"/>
</voice>
</marytts-install>

166
external/marytts-5.1.2/lgpl-3.0.txt vendored Normal file
View file

@ -0,0 +1,166 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View file

@ -0,0 +1 @@
ded7cebf8d1a3d9589f7a3efb6a74f485cb0c715

View file

@ -0,0 +1 @@
6c5cbb3b643d1781ac649994fc6c3e61260423fc

View file

@ -0,0 +1 @@
32839ee3251b54fd3de78666baeb8b7e112e3bbf

View file

@ -0,0 +1 @@
3672f1978fbcff06ddf522d0df7eed79e1654a87

Binary file not shown.

View file

@ -0,0 +1 @@
77f2d7a6e9cb2d0dd594af20f52f5033e89aaf8d

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1 @@
47c8c0d8509f015c1529d687fc44ddba6f9e792d

View file

@ -0,0 +1 @@
c64179cfd0bf177354da0a9ca262399f9208da23

View file

@ -0,0 +1 @@
de9c1039516bb708acf02ab7157f9d617d20a91f

View file

@ -0,0 +1,2 @@
Sathish | ' s a - t I S

View file

@ -0,0 +1,8 @@
Sathish | ' s A - t I S
Eire | ' E - r @
speech | ' s p i tS
hasnt | ' h { z n t
havent | ' h { v n t
mustnt | ' m V s n t
isnt | ' I z n t
does | ' d V z

View file

@ -0,0 +1,10 @@
Sathish | ' s A - t I S
Eire | ' E - r @
speech | ' s p i tS
hasnt | ' h { z n t
havent | ' h { v n t
mustnt | ' m V s n t
isnt | ' I z n t
Greta | ' g r { - t A
minute | ' m I - n I t
does | ' d V z

View file

@ -0,0 +1 @@
synthèse|s e~ - t E z

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
kyrka | C ,Y r - k %a

View file

@ -0,0 +1 @@
db99f8c288c731c6bd44400072813a7945d94bfa

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
<classpathentry kind="output" path="bin"/>
</classpath>

2
lib/java-speech-api-master/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/bin
.classpath

View file

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>java-speech-api-git</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments> </arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>

View file

@ -0,0 +1,37 @@
#Java-Speech-API Changelog
##Changelog
Changelog corresponds with a tagged and signed Git commit. This marks the changes.
A tagged commit may or may not have a corresponding binary version available.
Format: Tag: `<Corresponding Tag>`
* Version 1.15
* Optimized synthesiser class. Massive speed improvements on long input strings!
* Added experimental Duplex API in preparation for version 1.2 .
* Version 1.11 (Tag V1.100)
* Fixed major bug in Recognizer
* Version 1.10 (Tag v1.100)
* Added new Microphone Analyzer class.
* Added volume and frequency detection and frame work for (Voice Activity Detection)
* Microphone API updated to make it more usable.
* API re-branded as J.A.R.V.I.S. (Just A Reliable Vocal Interpreter & Synthesiser)
* Version 1.06 (Tag v1.016)
* Added support for synthesiser for strings longer than 100 characters (Credits to @Skylion007)
* Added support for synthesiser for multiple languages, accents, and voices. (Credits to @Skylion007)
* Added support for auto-detection of language within synthesiser. (Credits to @Skylion007)
* Version 1.05 (Tag: v1.015)
* Improved language support for recognizer (Credits to @duncanj)
* Add support for multiple responses for recognizer (Credits to @duncanj)
* Add profanity filter toggle support for recognizer (Credits to @duncanj)
* Version 1.01 (Tag: v1.01)
* Fixed state functions for Microphones
* Fixed encoding single byte frames
* Support Multiple Languages
* Version 1.00 (Tag: v1.00)
* Initial Release

View file

@ -0,0 +1,23 @@
#J.A.R.V.I.S. Speech API (Java-Speech API) Credits
##Credits
The following people/organizations have helped provide functionality for the API,
* JavaFlacEncoder Project
* Provided functionality to convert Wave files to FLAC format
* This allowed for the FLAC audio to be sent to Google to be "recognized"
* Created by Preston Lacey
* Homepage: http://sourceforge.net/projects/javaflacencoder/
* Google
* Provided functionality for two main API functions
* Recognizer
* Allows for speech audio to be recognized to text
* Synthesiser
* Allows for text to speech translation
* Homepage: http://google.com
* Princeton University
* The implemented FFT algorithm is derived from one on the university's website.
* Homepage: http://www.princeton.edu
We would like to thank the above so much for your work, this wrapper/API could not have been
created without it.

View file

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
{one line to give the program's name and a brief idea of what it does.}
Copyright (C) {year} {name of author}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
{project} Copyright (C) {year} {fullname}
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View file

@ -0,0 +1,30 @@
#J.A.R.V.I.S. (Java-Speech-API)
J.A.R.V.I.S. Java Speech API: Just A Reliable Vocal Interpreter & Synthesizer.
This is a project for the Java Speech API. The program interprets vocal inputs into text and synthesizes voices from text input.
The program supports dozens of languages and even has the ability to auto-detect languages!
## Description
The J.A.R.V.I.S. Speech API is designed to be simple and efficient, using the speech engines created by Google
to provide functionality for parts of the API. Essentially, it is an API written in Java,
including a recognizer, synthesizer, and a microphone capture utility. The project uses
Google services for the synthesizer and recognizer. While this requires an Internet
connection, it provides a complete, modern, and fully functional speech API in Java.
##Features
The API currently provides the following functionality,
* Microphone Capture API (Wrapped around the current Java API for simplicity)
* A speech recognizer using Google's recognizer service
* Converts WAVE files from microphone input to FLAC (using existing API, see CREDITS)
* Retrieves Response from Google, including confidence score and text
* A speech synthesiser using Google's synthesizer service
* Retrieves synthesized text in an InputStream (MP3 data ready to be played)
* Wave to FLAC API (Wrapped around the used API in the project, javaFlacEncoder, see CREDITS)
* A translator using Google Translate (courtesy of Skylion's Google Toolkit)
##Changelog
See CHANGELOG.markdown for Version History/Changelog
##Credits
See CREDITS.markdown for Credits

View file

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="library" name="javaFlacEncoder-0.2" level="project" />
</component>
</module>

View file

@ -0,0 +1,2 @@
Manifest-Version: 1.0

View file

@ -0,0 +1,224 @@
package com.darkprograms.speech.microphone;
import javax.sound.sampled.*;
import java.io.Closeable;
import java.io.File;
/***************************************************************************
* Microphone class that contains methods to capture audio from microphone
*
* @author Luke Kuza, Aaron Gokaslan
***************************************************************************/
public class Microphone implements Closeable{
/**
* TargetDataLine variable to receive data from microphone
*/
private TargetDataLine targetDataLine;
/**
* Enum for current Microphone state
*/
public enum CaptureState {
PROCESSING_AUDIO, STARTING_CAPTURE, CLOSED
}
/**
* Variable for enum
*/
CaptureState state;
/**
* Variable for the audios saved file type
*/
private AudioFileFormat.Type fileType;
/**
* Variable that holds the saved audio file
*/
private File audioFile;
/**
* Gets the current state of Microphone
*
* @return PROCESSING_AUDIO is returned when the Thread is recording Audio and/or saving it to a file<br>
* STARTING_CAPTURE is returned if the Thread is setting variables<br>
* CLOSED is returned if the Thread is not doing anything/not capturing audio
*/
public CaptureState getState() {
return state;
}
/**
* Sets the current state of Microphone
*
* @param state State from enum
*/
private void setState(CaptureState state) {
this.state = state;
}
public File getAudioFile() {
return audioFile;
}
public void setAudioFile(File audioFile) {
this.audioFile = audioFile;
}
public AudioFileFormat.Type getFileType() {
return fileType;
}
public void setFileType(AudioFileFormat.Type fileType) {
this.fileType = fileType;
}
public TargetDataLine getTargetDataLine() {
return targetDataLine;
}
public void setTargetDataLine(TargetDataLine targetDataLine) {
this.targetDataLine = targetDataLine;
}
/**
* Constructor
*
* @param fileType File type to save the audio in<br>
* Example, to save as WAVE use AudioFileFormat.Type.WAVE
*/
public Microphone(AudioFileFormat.Type fileType) {
setState(CaptureState.CLOSED);
setFileType(fileType);
initTargetDataLine();
}
/**
* Initializes the target data line.
*/
private void initTargetDataLine(){
DataLine.Info dataLineInfo = new DataLine.Info(TargetDataLine.class, getAudioFormat());
try {
setTargetDataLine((TargetDataLine) AudioSystem.getLine(dataLineInfo));
} catch (LineUnavailableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return;
}
}
/**
* Captures audio from the microphone and saves it a file
*
* @param audioFile The File to save the audio to
* @throws LineUnavailableException
* @throws Exception Throws an exception if something went wrong
*/
public void captureAudioToFile(File audioFile) throws LineUnavailableException {
setState(CaptureState.STARTING_CAPTURE);
setAudioFile(audioFile);
if(getTargetDataLine() == null){
initTargetDataLine();
}
//Get Audio
new Thread(new CaptureThread()).start();
}
/**
* Captures audio from the microphone and saves it a file
*
* @param audioFile The fully path (String) to a file you want to save the audio in
* @throws LineUnavailableException
* @throws Exception Throws an exception if something went wrong
*/
public void captureAudioToFile(String audioFile) throws LineUnavailableException {
File file = new File(audioFile);
captureAudioToFile(file);
}
/**
* The audio format to save in
*
* @return Returns AudioFormat to be used later when capturing audio from microphone
*/
public AudioFormat getAudioFormat() {
float sampleRate = 8000.0F;
//8000,11025,16000,22050,44100
int sampleSizeInBits = 16;
//8,16
int channels = 1;
//1,2
boolean signed = true;
//true,false
boolean bigEndian = false;
//true,false
return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed, bigEndian);
}
/**
* Opens the microphone, starting the targetDataLine.
* If it's already open, it does nothing.
*/
public void open(){
if(getTargetDataLine()==null){
initTargetDataLine();
}
if(!getTargetDataLine().isOpen() && !getTargetDataLine().isRunning() && !getTargetDataLine().isActive()){
try {
setState(CaptureState.PROCESSING_AUDIO);
getTargetDataLine().open(getAudioFormat());
getTargetDataLine().start();
} catch (LineUnavailableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
return;
}
}
}
/**
* Close the microphone capture, saving all processed audio to the specified file.<br>
* If already closed, this does nothing
*/
public void close() {
if (getState() == CaptureState.CLOSED) {
} else {
getTargetDataLine().stop();
getTargetDataLine().close();
setState(CaptureState.CLOSED);
}
}
/**
* Thread to capture the audio from the microphone and save it to a file
*/
private class CaptureThread implements Runnable {
/**
* Run method for thread
*/
public void run() {
try {
AudioFileFormat.Type fileType = getFileType();
File audioFile = getAudioFile();
open();
AudioSystem.write(new AudioInputStream(getTargetDataLine()), fileType, audioFile);
//Will write to File until it's closed.
} catch (Exception ex) {
ex.printStackTrace();
}
}
}
}

View file

@ -0,0 +1,288 @@
package com.darkprograms.speech.microphone;
import javax.sound.sampled.AudioFileFormat;
import com.darkprograms.speech.util.*;
/********************************************************************************************
* Microphone Analyzer class, detects pitch and volume while extending the microphone class.
* Implemented as a precursor to a Voice Activity Detection (VAD) algorithm.
* Currently can be used for audio data analysis.
* Dependencies: FFT.java & Complex.java. Both found in the utility package.
* @author Aaron Gokaslan
********************************************************************************************/
public class MicrophoneAnalyzer extends Microphone {
/**
* Constructor
* @param fileType The file type you want to save in. FLAC recommended.
*/
public MicrophoneAnalyzer(AudioFileFormat.Type fileType){
super(fileType);
}
/**
* Gets the volume of the microphone input
* Interval is 100ms so allow 100ms for this method to run in your code or specify smaller interval.
* @return The volume of the microphone input or -1 if data-line is not available
*/
public int getAudioVolume(){
return getAudioVolume(100);
}
/**
* Gets the volume of the microphone input
* @param interval: The length of time you would like to calculate the volume over in milliseconds.
* @return The volume of the microphone input or -1 if data-line is not available.
*/
public int getAudioVolume(int interval){
return calculateAudioVolume(this.getNumOfBytes(interval/1000d));
}
/**
* Gets the volume of microphone input
* @param numOfBytes The number of bytes you want for volume interpretation
* @return The volume over the specified number of bytes or -1 if data-line is unavailable.
*/
private int calculateAudioVolume(int numOfBytes){
byte[] data = getBytes(numOfBytes);
if(data==null)
return -1;
return calculateRMSLevel(data);
}
/**
* Calculates the volume of AudioData which may be buffered data from a data-line.
* @param audioData The byte[] you want to determine the volume of
* @return the calculated volume of audioData
*/
public static int calculateRMSLevel(byte[] audioData){
long lSum = 0;
for(int i=0; i<audioData.length; i++)
lSum = lSum + audioData[i];
double dAvg = lSum / audioData.length;
double sumMeanSquare = 0d;
for(int j=0; j<audioData.length; j++)
sumMeanSquare = sumMeanSquare + Math.pow(audioData[j] - dAvg, 2d);
double averageMeanSquare = sumMeanSquare / audioData.length;
return (int)(Math.pow(averageMeanSquare,0.5d) + 0.5);
}
/**
* Returns the number of bytes over interval for useful when figuring out how long to record.
* @param seconds The length in seconds
* @return the number of bytes the microphone will save.
*/
public int getNumOfBytes(int seconds){
return getNumOfBytes((double)seconds);
}
/**
* Returns the number of bytes over interval for useful when figuring out how long to record.
* @param seconds The length in seconds
* @return the number of bytes the microphone will output over the specified time.
*/
public int getNumOfBytes(double seconds){
return (int)(seconds*getAudioFormat().getSampleRate()*getAudioFormat().getFrameSize()+.5);
}
/**
* Returns the a byte[] containing the specified number of bytes
* @param numOfBytes The length of the returned array.
* @return The specified array or null if it cannot.
*/
private byte[] getBytes(int numOfBytes){
if(getTargetDataLine()!=null){
byte[] data = new byte[numOfBytes];
this.getTargetDataLine().read(data, 0, numOfBytes);
return data;
}
return null;//If data cannot be read, returns a null array.
}
/**
* Calculates the fundamental frequency. In other words, it calculates pitch,
* except pitch is far more subjective and subtle. Also note, that readings may occasionally,
* be in error due to the complex nature of sound. This feature is in Beta
* @return The frequency of the sound in Hertz.
*/
public int getFrequency(){
try {
return getFrequency(4096);
} catch (Exception e) {
//This will never happen. Ever...
return -666;
}
}
/**
* Calculates the frequency based off of the number of bytes.
* CAVEAT: THE NUMBER OF BYTES MUST BE A MULTIPLE OF 2!!!
* @param numOfBytes The number of bytes which must be a multiple of 2!!!
* @return The calculated frequency in Hertz.
*/
public int getFrequency(int numOfBytes) throws Exception{
if(getTargetDataLine() == null){
return -1;
}
byte[] data = new byte[numOfBytes+1];//One byte is lost during conversion
this.getTargetDataLine().read(data, 0, numOfBytes);
return getFrequency(data);
}
/**
* Calculates the frequency based off of the byte array,
* @param bytes The audioData you want to analyze
* @return The calculated frequency in Hertz.
*/
public int getFrequency(byte[] bytes){
double[] audioData = this.bytesToDoubleArray(bytes);
audioData = applyHanningWindow(audioData);
Complex[] complex = new Complex[audioData.length];
for(int i = 0; i<complex.length; i++){
complex[i] = new Complex(audioData[i], 0);
}
Complex[] fftTransformed = FFT.fft(complex);
return this.calculateFundamentalFrequency(fftTransformed, 4);
}
/**
* Applies a Hanning Window to the data set.
* Hanning Windows are used to increase the accuracy of the FFT.
* One should always apply a window to a dataset before applying an FFT
* @param The data you want to apply the window to
* @return The windowed data set
*/
private double[] applyHanningWindow(double[] data){
return applyHanningWindow(data, 0, data.length);
}
/**
* Applies a Hanning Window to the data set.
* Hanning Windows are used to increase the accuracy of the FFT.
* One should always apply a window to a dataset before applying an FFT
* @param The data you want to apply the window to
* @param The starting index you want to apply a window from
* @param The size of the window
* @return The windowed data set
*/
private double[] applyHanningWindow(double[] signal_in, int pos, int size){
for (int i = pos; i < pos + size; i++){
int j = i - pos; // j = index into Hann window function
signal_in[i] = (double)(signal_in[i] * 0.5 * (1.0 - Math.cos(2.0 * Math.PI * j / size)));
}
return signal_in;
}
/**
* This method calculates the fundamental frequency using Harmonic Product Specturm
* It down samples the FFTData four times and multiplies the arrays
* together to determine the fundamental frequency. This is slightly more computationally
* expensive, but much more accurate. In simpler terms, the function will remove the harmonic frequencies
* which occur at every N value by finding the lowest common divisor among them.
* @param fftData The array returned by the FFT
* @param N the number of times you wish to downsample.
* WARNING: The more times you downsample, the lower the maximum detectable frequency is.
* @return The fundamental frequency in Hertz
*/
private int calculateFundamentalFrequency(Complex[] fftData, int N){
if(N<=0 || fftData == null){ return -1; } //error case
final int LENGTH = fftData.length;//Used to calculate bin size
fftData = removeNegativeFrequencies(fftData);
Complex[][] data = new Complex[N][fftData.length/N];
for(int i = 0; i<N; i++){
for(int j = 0; j<data[0].length; j++){
data[i][j] = fftData[j*(i+1)];
}
}
Complex[] result = new Complex[fftData.length/N];//Combines the arrays
for(int i = 0; i<result.length; i++){
Complex tmp = new Complex(1,0);
for(int j = 0; j<N; j++){
tmp = tmp.times(data[j][i]);
}
result[i] = tmp;
}
int index = this.findMaxMagnitude(result);
return index*getFFTBinSize(LENGTH);
}
/**
* Removes useless data from transform since sound doesn't use complex numbers.
* @param The data you want to remove the complex transforms from
* @return The cleaned data
*/
private Complex[] removeNegativeFrequencies(Complex[] c){
Complex[] out = new Complex[c.length/2];
for(int i = 0; i<out.length; i++){
out[i] = c[i];
}
return out;
}
/**
* Calculates the FFTbin size based off the length of the the array
* Each FFTBin size represents the range of frequencies treated as one.
* For example, if the bin size is 5 then the algorithm is precise to within 5hz.
* Precondition: length cannot be 0.
* @param fftDataLength The length of the array used to feed the FFT algorithm
* @return FFTBin size
*/
private int getFFTBinSize(int fftDataLength){
return (int)(getAudioFormat().getSampleRate()/fftDataLength+.5);
}
/**
* Calculates index of the maximum magnitude in a complex array.
* @param The Complex[] you want to get max magnitude from.
* @return The index of the max magnitude
*/
private int findMaxMagnitude(Complex[] input){
//Calculates Maximum Magnitude of the array
double max = Double.MIN_VALUE;
int index = -1;
for(int i = 0; i<input.length; i++){
Complex c = input[i];
double tmp = c.getMagnitude();
if(tmp>max){
max = tmp;;
index = i;
}
}
return index;
}
/**
* Converts bytes from a TargetDataLine into a double[] allowing the information to be read.
* NOTE: One byte is lost in the conversion so don't expect the arrays to be the same length!
* @param bufferData The buffer read in from the target data line
* @return The double[] that the buffer has been converted into.
*/
private double[] bytesToDoubleArray(byte[] bufferData){
final int bytesRecorded = bufferData.length;
final int bytesPerSample = getAudioFormat().getSampleSizeInBits()/8;
final double amplification = 100.0; // choose a number as you like
double[] micBufferData = new double[bytesRecorded - bytesPerSample +1];
for (int index = 0, floatIndex = 0; index < bytesRecorded - bytesPerSample + 1; index += bytesPerSample, floatIndex++) {
double sample = 0;
for (int b = 0; b < bytesPerSample; b++) {
int v = bufferData[index + b];
if (b < bytesPerSample - 1 || bytesPerSample == 1) {
v &= 0xFF;
}
sample += v << (b * 8);
}
double sample32 = amplification * (sample / 32768.0);
micBufferData[floatIndex] = sample32;
}
return micBufferData;
}
}

View file

@ -0,0 +1,120 @@
package com.darkprograms.speech.recognizer;
import javaFlacEncoder.FLACEncoder;
import javaFlacEncoder.FLACFileOutputStream;
import javaFlacEncoder.StreamConfiguration;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import java.io.File;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
/*************************************************************************************************************
* Class that contains methods to encode Wave files to FLAC files
* THIS IS THANKS TO THE javaFlacEncoder Project created here: http://sourceforge.net/projects/javaflacencoder/
************************************************************************************************************/
public class FlacEncoder {
/**
* Constructor
*/
public FlacEncoder() {
}
/**
* Converts a wave file to a FLAC file(in order to POST the data to Google and retrieve a response) <br>
* Sample Rate is 8000 by default
*
* @param inputFile Input wave file
* @param outputFile Output FLAC file
*/
public void convertWaveToFlac(File inputFile, File outputFile) {
StreamConfiguration streamConfiguration = new StreamConfiguration();
streamConfiguration.setSampleRate(8000);
streamConfiguration.setBitsPerSample(16);
streamConfiguration.setChannelCount(1);
try {
AudioInputStream audioInputStream = AudioSystem.getAudioInputStream(inputFile);
AudioFormat format = audioInputStream.getFormat();
int frameSize = format.getFrameSize();
FLACEncoder flacEncoder = new FLACEncoder();
FLACFileOutputStream flacOutputStream = new FLACFileOutputStream(outputFile);
flacEncoder.setStreamConfiguration(streamConfiguration);
flacEncoder.setOutputStream(flacOutputStream);
flacEncoder.openFLACStream();
int frameLength = (int) audioInputStream.getFrameLength();
if(frameLength <= AudioSystem.NOT_SPECIFIED){
frameLength = 16384;//Arbitrary file size
}
int[] sampleData = new int[frameLength];
byte[] samplesIn = new byte[frameSize];
int i = 0;
while (audioInputStream.read(samplesIn, 0, frameSize) != -1) {
if (frameSize != 1) {
ByteBuffer bb = ByteBuffer.wrap(samplesIn);
bb.order(ByteOrder.LITTLE_ENDIAN);
short shortVal = bb.getShort();
sampleData[i] = shortVal;
} else {
sampleData[i] = samplesIn[0];
}
i++;
}
sampleData = truncateNullData(sampleData, i);
flacEncoder.addSamples(sampleData, i);
flacEncoder.encodeSamples(i, false);
flacEncoder.encodeSamples(flacEncoder.samplesAvailableToEncode(), true);
audioInputStream.close();
flacOutputStream.close();
} catch (Exception ex) {
ex.printStackTrace();
}
}
/**
* Converts a wave file to a FLAC file(in order to POST the data to Google and retrieve a response) <br>
* Sample Rate is 8000 by default
*
* @param inputFile Input wave file
* @param outputFile Output FLAC file
*/
public void convertWaveToFlac(String inputFile, String outputFile) {
convertWaveToFlac(new File(inputFile), new File(outputFile));
}
/**
* Used for when the frame length is unknown to shorten the array to prevent huge blank end space
* @param sampleData The int[] array you want to shorten
* @param index The index you want to shorten it to
* @return The shortened array
*/
private int[] truncateNullData(int[] sampleData, int index){
if(index == sampleData.length) return sampleData;
int[] out = new int[index];
for(int i = 0; i<index; i++){
out[i] = sampleData[i];
}
return out;
}
}

View file

@ -0,0 +1,524 @@
package com.darkprograms.speech.recognizer;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import javaFlacEncoder.FLACFileWriter;
import javax.net.ssl.HttpsURLConnection;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
import com.darkprograms.speech.util.ChunkedOutputStream;
import com.darkprograms.speech.util.StringUtil;
/**
* A class for using Google's Duplex Speech API. Allows for continuous recognition. Requires an API-Key.
* A duplex API opens two connections. One to an upstream and one to a downstream. The system allows
* for continuous chunking on both up and downstream. This, in turn, allows for Google to return data
* as data is sent to it. For this reason, this class uses listeners.
* @author Skylion (Aaron Gokaslan), Robert Rowntree.
*/
public class GSpeechDuplex{
//TODO Cleanup Printlns
/**
* Minimum value for ID
*/
private static final long MIN = 10000000;
/**
* Maximum value for ID
*/
private static final long MAX = 900000009999999L;
/**
* The base URL for the API
*/
private static final String GOOGLE_DUPLEX_SPEECH_BASE = "https://www.google.com/speech-api/full-duplex/v1/";
/**
* Stores listeners
*/
private List<GSpeechResponseListener> responseListeners = new ArrayList<GSpeechResponseListener>();
/**
* User defined API-KEY
*/
private final String API_KEY;
/**
* User-defined language
*/
private String language = "auto";
/**
* The maximum size the API will tolerate
*/
private final static int MAX_SIZE = 1048576;
/**
* Per specification, the final chunk of in a ChunkedOutputStream
*/
private final static byte[] FINAL_CHUNK = new byte[] { '0', '\r', '\n', '\r', '\n' };
/**
* Constructor
* @param API_KEY The API-Key for Google's Speech API. An API key can be obtained by requesting
* one by following the process shown at this
* <a href="http://www.chromium.org/developers/how-tos/api-keys">url</a>.
*/
public GSpeechDuplex(String API_KEY){
this.API_KEY = API_KEY;
}
/**
* Temporary will be deprecated before release
*/
public String getLanguage(){
return language;
}
/**
* Temporary will be deprecated before release
*/
public void setLanguage(String language){
this.language = language;
}
/**
* Send a FLAC file with the specified sampleRate to the Duplex API
* @param flacFile The file you wish to upload.
* NOTE: Segment the file if duration is greater than 15 seconds.
* @param sampleRate The sample rate of the file.
* @throws IOException If something has gone wrong with reading the file
*/
public void recognize(File flacFile, int sampleRate) throws IOException{
recognize(mapFileIn(flacFile), sampleRate);
}
/**
* Send a byte[] to the URL with a specified sampleRate.
* NOTE: The byte[] should contain no more than 15 seconds of audio.
* Chunking is not fully implemented as of yet. Will not string data together for context yet.
* @param data The byte[] you want to send.
* @param sampleRate The sample rate of aforementioned byte array.
*/
public void recognize(byte[] data, int sampleRate){
if(data.length >= MAX_SIZE){//Temporary Chunking. Does not allow for Google to gather context.
System.out.println("Chunking the audio into smaller parts...");
byte[][] dataArray = chunkAudio(data);
for(byte[]array: dataArray){
recognize(array, sampleRate);
}
}
//Generates a unique ID for the response.
final long PAIR = MIN + (long)(Math.random() * ((MAX - MIN) + 1L));
//Generates the Downstream URL
final String API_DOWN_URL = GOOGLE_DUPLEX_SPEECH_BASE + "down?maxresults=1&pair=" + PAIR;
//Generates the Upstream URL
final String API_UP_URL = GOOGLE_DUPLEX_SPEECH_BASE +
"up?lang=" + language + "&lm=dictation&client=chromium&pair=" + PAIR +
"&key=" + API_KEY ;
//Opens downChannel
this.downChannel(API_DOWN_URL);
//Opens upChannel
this.upChannel(API_UP_URL, chunkAudio(data), sampleRate);
}
/**
* This method allows you to stream a continuous stream of data to the API.
* <p>Note: This feature is experimental.</p>
* @param tl
* @param af
* @throws IOException
* @throws LineUnavailableException
*/
public void recognize(TargetDataLine tl, AudioFormat af) throws IOException, LineUnavailableException{
//Generates a unique ID for the response.
final long PAIR = MIN + (long)(Math.random() * ((MAX - MIN) + 1L));
//Generates the Downstream URL
final String API_DOWN_URL = GOOGLE_DUPLEX_SPEECH_BASE + "down?maxresults=1&pair=" + PAIR;
//Generates the Upstream URL
final String API_UP_URL = GOOGLE_DUPLEX_SPEECH_BASE +
"up?lang=" + language + "&lm=dictation&client=chromium&pair=" + PAIR +
"&key=" + API_KEY + "&continuous"; //Tells Google to constantly monitor the stream;
//TODO Add implementation that sends feedback in real time. Protocol buffers will be necessary.
//Opens downChannel
this.downChannel(API_DOWN_URL);
//Opens upChannel
this.upChannel(API_UP_URL, tl, af);
}
/**
* This code opens a new Thread that connects to the downstream URL. Due to threading,
* the best way to handle this is through the use of listeners.
* @param The URL you want to connect to.
*/
private void downChannel(String urlStr) {
final String url = urlStr;
new Thread ("Downstream Thread") {
public void run() {
// handler for DOWN channel http response stream - httpsUrlConn
// response handler should manage the connection.... ??
// assign a TIMEOUT Value that exceeds by a safe factor
// the amount of time that it will take to write the bytes
// to the UPChannel in a fashion that mimics a liveStream
// of the audio at the applicable Bitrate. BR=sampleRate * bits per sample
// Note that the TLS session uses "* SSLv3, TLS alert, Client hello (1): "
// to wake up the listener when there are additional bytes.
// The mechanics of the TLS session should be transparent. Just use
// httpsUrlConn and allow it enough time to do its work.
Scanner inStream = openHttpsConnection(url);
if(inStream == null){
//ERROR HAS OCCURED
}
while(inStream.hasNextLine()){
String response = inStream.nextLine();
System.out.println("Response: "+response);
if(response.length()>17){//Prevents blank responses from Firing
GoogleResponse gr = new GoogleResponse();
parseResponse(response, gr);
fireResponseEvent(gr);
}
}
inStream.close();
System.out.println("Finished write on down stream...");
}
}.start();
}
/**
* Used to initiate the URL chunking for the upChannel.
* @param urlStr The URL string you want to upload 2
* @param data The data you want to send to the URL
* @param sampleRate The specified sample rate of the data.
*/
private void upChannel(String urlStr, byte[][] data, int sampleRate) {
final String murl = urlStr;
final byte[][] mdata = data;
final int mSampleRate = sampleRate;
new Thread ("Upstream File Thread") {
public void run() {
openHttpsPostConnection(murl, mdata, mSampleRate);
//Google does not return data via this URL
}
}.start();
}
/**
* Streams data from the TargetDataLine to the API.
* @param urlStr The URL to stream to
* @param tl The target data line to stream from.
* @param af The AudioFormat to stream with.
* @throws LineUnavailableException If cannot open or stream the TargetDataLine.
*/
private void upChannel(String urlStr, TargetDataLine tl, AudioFormat af) throws LineUnavailableException{
final String murl = urlStr;
final TargetDataLine mtl = tl;
final AudioFormat maf = af;
if(!mtl.isOpen()){
mtl.open(maf);
mtl.start();
}
new Thread ("Upstream Thread") {
public void run() {
openHttpsPostConnection(murl, mtl, maf);
}
}.start();
}
/**
* Opens a HTTPS connection to the specified URL string
* @param urlStr The URL you want to visit
* @return The Scanner to access aforementioned data.
*/
private Scanner openHttpsConnection(String urlStr) {
int resCode = -1;
try {
URL url = new URL(urlStr);
URLConnection urlConn = url.openConnection();
if (!(urlConn instanceof HttpsURLConnection)) {
throw new IOException ("URL is not an Https URL");
}
HttpsURLConnection httpConn = (HttpsURLConnection)urlConn;
httpConn.setAllowUserInteraction(false);
// TIMEOUT is required
httpConn.setInstanceFollowRedirects(true);
httpConn.setRequestMethod("GET");
httpConn.connect();
resCode = httpConn.getResponseCode();
if (resCode == HttpsURLConnection.HTTP_OK) {
return new Scanner(httpConn.getInputStream());
}
else{
System.out.println("Error: " + resCode);
}
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
/**
* Opens a HTTPSPostConnection that posts data from a TargetDataLine input
* @param murl The URL you want to post to.
* @param mtl The TargetDataLine you want to post data from. <b>Note should be open</b>
* @param maf The AudioFormat of the data you want to post
*/
private void openHttpsPostConnection(final String murl,
final TargetDataLine mtl, final AudioFormat maf) {
URL url;
try {
url = new URL(murl);
URLConnection urlConn = url.openConnection();
if (!(urlConn instanceof HttpsURLConnection)) {
throw new IOException ("URL is not an Https URL");
}
HttpsURLConnection httpConn = (HttpsURLConnection)urlConn;
httpConn.setAllowUserInteraction(false);
httpConn.setInstanceFollowRedirects(true);
httpConn.setRequestMethod("POST");
httpConn.setDoOutput(true);
httpConn.setChunkedStreamingMode(0);
httpConn.setRequestProperty("Transfer-Encoding", "chunked");
httpConn.setRequestProperty("Content-Type", "audio/x-flac; rate=" + (int)maf.getSampleRate());
// also worked with ("Content-Type", "audio/amr; rate=8000");
httpConn.connect();
// this opens a connection, then sends POST & headers.
OutputStream out = httpConn.getOutputStream();
//Note : if the audio is more than 15 seconds
// dont write it to UrlConnInputStream all in one block as this sample does.
// Rather, segment the byteArray and on intermittently, sleeping thread
// supply bytes to the urlConn Stream at a rate that approaches
// the bitrate ( =30K per sec. in this instance ).
System.out.println("Starting to write data to output...");
AudioInputStream ais = new AudioInputStream(mtl);
ChunkedOutputStream os = new ChunkedOutputStream(out);
AudioSystem.write(ais, FLACFileWriter.FLAC, os);
out.write(FINAL_CHUNK);
System.out.println("IO WRITE DONE");
out.close();
// do you need the trailer?
// NOW you can look at the status.
int resCode = httpConn.getResponseCode();
if (resCode / 100 != 2) {
System.out.println("ERROR");
}
}catch(Exception ex){
ex.printStackTrace();
}
}
/**
* Opens a chunked HTTPS post connection and returns a Scanner with incoming data from Google Server
* Used for to get UPStream
* Chunked HTTPS ensures unlimited file size.
* @param urlStr The String for the URL
* @param data The data you want to send the server
* @param sampleRate The sample rate of the flac file.
* @return A Scanner to access the server response. (Probably will never be used)
*/
private Scanner openHttpsPostConnection(String urlStr, byte[][] data, int sampleRate){
byte[][] mextrad = data;
int resCode = -1;
OutputStream out = null;
// int http_status;
try {
URL url = new URL(urlStr);
URLConnection urlConn = url.openConnection();
if (!(urlConn instanceof HttpsURLConnection)) {
throw new IOException ("URL is not an Https URL");
}
HttpsURLConnection httpConn = (HttpsURLConnection)urlConn;
httpConn.setAllowUserInteraction(false);
httpConn.setInstanceFollowRedirects(true);
httpConn.setRequestMethod("POST");
httpConn.setDoOutput(true);
httpConn.setChunkedStreamingMode(0);
httpConn.setRequestProperty("Transfer-Encoding", "chunked");
httpConn.setRequestProperty("Content-Type", "audio/x-flac; rate=" + sampleRate);
// also worked with ("Content-Type", "audio/amr; rate=8000");
httpConn.connect();
try {
// this opens a connection, then sends POST & headers.
out = httpConn.getOutputStream();
//Note : if the audio is more than 15 seconds
// dont write it to UrlConnInputStream all in one block as this sample does.
// Rather, segment the byteArray and on intermittently, sleeping thread
// supply bytes to the urlConn Stream at a rate that approaches
// the bitrate ( =30K per sec. in this instance ).
System.out.println("Starting to write");
for(byte[] dataArray: mextrad){
out.write(dataArray); // one big block supplied instantly to the underlying chunker wont work for duration > 15 s.
try {
Thread.sleep(1000);//Delays the Audio so Google thinks its a mic.
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
out.write(FINAL_CHUNK);
System.out.println("IO WRITE DONE");
// do you need the trailer?
// NOW you can look at the status.
resCode = httpConn.getResponseCode();
if (resCode / 100 != 2) {
System.out.println("ERROR");
}
} catch (IOException e) {
}
if (resCode == HttpsURLConnection.HTTP_OK) {
return new Scanner(httpConn.getInputStream());
}
else{
System.out.println("HELP: " + resCode);
}
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
/**
* Converts the file into a byte[]. Also Android compatible. :)
* @param The File you want to get the byte[] from.
* @return The byte[]
* @throws IOException if something goes wrong in reading the file.
*/
private byte[] mapFileIn(File infile) throws IOException{
return Files.readAllBytes(infile.toPath());
}
/**
* Parses the String into a GoogleResponse object
* @param rawResponse The String you want to parse
* @param gr the GoogleResponse object to save the data into.
*/
private void parseResponse(String rawResponse, GoogleResponse gr){
if(rawResponse == null || !rawResponse.contains("\"result\"")
|| rawResponse.equals("{\"result\":[]}")){ return; }
if(rawResponse.contains("\"confidence\":")){
String confidence = StringUtil.substringBetween(rawResponse, "\"confidence\":", "}");
gr.setConfidence(confidence);
}
else{
gr.setConfidence(String.valueOf(1d));
}
String array = StringUtil.trimString(rawResponse, "[", "]");
if(array.contains("[")){
array = StringUtil.trimString(array, "[", "]");
}
if(array.contains("\"confidence\":")){//Removes confidence phrase if it exists.
array = array.substring(0, array.lastIndexOf(','));
}
String[] parts = array.split(",");
gr.setResponse(parseTranscript(parts[0]));
for(int i = 1; i<parts.length; i++){
gr.getOtherPossibleResponses().add(parseTranscript(parts[i]));
}
}
/**
* Parses each individual "transcript" phrase
* @param The string fragment to parse
* @return The parsed String
*/
private String parseTranscript(String s){
String tmp = s.substring(s.indexOf(":")+1);
if(s.endsWith("}")){
tmp = tmp.substring(0, tmp.length()-1);
}
tmp = StringUtil.stripQuotes(tmp);
if(tmp.charAt(0)==' '){//Removes space at beginning if it exists
tmp = tmp.substring(1);
}
return tmp;
}
/**
* Adds GSpeechResponse Listeners that fire when Google sends a response.
* @param The Listeners you want to add
*/
public synchronized void addResponseListener(GSpeechResponseListener rl){
responseListeners.add(rl);
}
/**
* Removes GSpeechResponseListeners that fire when Google sends a response.
* @param rl
*/
public synchronized void removeResponseListener(GSpeechResponseListener rl){
responseListeners.remove(rl);
}
/**
* Fires responseListeners
* @param gr The Google Response (in this case the response event).
*/
private synchronized void fireResponseEvent(GoogleResponse gr){
for(GSpeechResponseListener gl: responseListeners){
gl.onResponse(gr);
}
}
/**
* Chunks audio into smaller chunks to stream to the duplex API
* @param data The data you want to break into smaller pieces
* @return the byte[][] containing on array of chunks.
*/
private byte[][] chunkAudio(byte[] data) {
if(data.length >= MAX_SIZE){//If larger than 1MB
int frame = MAX_SIZE/2;
int numOfChunks = (int)(data.length/((double)frame)) + 1;
byte[][] data2D = new byte[numOfChunks][];
for(int i = 0, j = 0; i<data.length && j<data2D.length; i+=frame, j++){
int length = (data.length - i < frame)? data.length - i: frame;
System.out.println("LENGTH: " + length);
data2D[j] = new byte[length];
System.arraycopy(data, i, data2D[j], 0, length);
}
return data2D;
}
else{
byte[][] tmpData = new byte[1][data.length];
System.arraycopy(data, 0, tmpData[0], 0, data.length);
return tmpData;
}
}
}

View file

@ -0,0 +1,12 @@
package com.darkprograms.speech.recognizer;
/**
* Response listeners for URL connections.
* @author Skylion
*
*/
public interface GSpeechResponseListener {
public void onResponse(GoogleResponse gr);
}

View file

@ -0,0 +1,89 @@
package com.darkprograms.speech.recognizer;
import java.util.ArrayList;
import java.util.List;
/******************************************************************************
* Class that holds the response and confidence of a Google recognizer request
*
* @author Luke Kuza, Duncan Jauncey, Aaron Gokaslan
******************************************************************************/
public class GoogleResponse {
/**
* Variable that holds the response
*/
private String response;
/**
* Variable that holds the confidence score
*/
private String confidence;
/**
* List that holds other possible responses for this request.
*/
private List<String> otherPossibleResponses = new ArrayList<String>(20);
/**
* Constructor
*/
public GoogleResponse() {
}
/**
* Gets the response text of what was said in the submitted Audio to Google
*
* @return String representation of what was said
*/
public String getResponse() {
return response;
}
/**
* Set the response
*
* @param response The response
*/
protected void setResponse(String response) {
this.response = response;
}
/**
* Gets the confidence score for the specific request
*
* @return The confidence score, ex .922343324323
*/
public String getConfidence() {
return confidence;
}
/**
* Set the confidence score for this request
*
* @param confidence The confidence score
*/
protected void setConfidence(String confidence) {
this.confidence = confidence;
}
/**
* Get other possible responses for this request.
* @return other possible responses
*/
public List<String> getOtherPossibleResponses() {
return otherPossibleResponses;
}
/**
* Gets all returned responses for this request
* @return All returned responses
*/
public List<String> getAllPossibleResponses() {
List<String> tmp = otherPossibleResponses;
tmp.add(0,response);
return tmp;
}
}

View file

@ -0,0 +1,466 @@
package com.darkprograms.speech.recognizer;
import java.io.*;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.Charset;
import com.darkprograms.speech.util.StringUtil;
/***************************************************************
* Class that submits FLAC audio and retrieves recognized text
*
* @author Luke Kuza, Duncan Jauncey, Aaron Gokaslan
**************************************************************/
@Deprecated
public class Recognizer {
@Deprecated
public enum Languages{
AUTO_DETECT("auto"),//tells Google to auto-detect the language
ARABIC_JORDAN("ar-JO"),
ARABIC_LEBANON("ar-LB"),
ARABIC_QATAR("ar-QA"),
ARABIC_UAE("ar-AE"),
ARABIC_MOROCCO("ar-MA"),
ARABIC_IRAQ("ar-IQ"),
ARABIC_ALGERIA("ar-DZ"),
ARABIC_BAHRAIN("ar-BH"),
ARABIC_LYBIA("ar-LY"),
ARABIC_OMAN("ar-OM"),
ARABIC_SAUDI_ARABIA("ar-SA"),
ARABIC_TUNISIA("ar-TN"),
ARABIC_YEMEN("ar-YE"),
BASQUE("eu"),
CATALAN("ca"),
CZECH("cs"),
DUTCH("nl-NL"),
ENGLISH_AUSTRALIA("en-AU"),
ENGLISH_CANADA("en-CA"),
ENGLISH_INDIA("en-IN"),
ENGLISH_NEW_ZEALAND("en-NZ"),
ENGLISH_SOUTH_AFRICA("en-ZA"),
ENGLISH_UK("en-GB"),
ENGLISH_US("en-US"),
FINNISH("fi"),
FRENCH("fr-FR"),
GALICIAN("gl"),
GERMAN("de-DE"),
HEBREW("he"),
HUNGARIAN("hu"),
ICELANDIC("is"),
ITALIAN("it-IT"),
INDONESIAN("id"),
JAPANESE("ja"),
KOREAN("ko"),
LATIN("la"),
CHINESE_SIMPLIFIED("zh-CN"),
CHINESE_TRANDITIONAL("zh-TW"),
CHINESE_HONGKONG("zh-HK"),
CHINESE_CANTONESE("zh-yue"),
MALAYSIAN("ms-MY"),
NORWEGIAN("no-NO"),
POLISH("pl"),
PIG_LATIN("xx-piglatin"),
PORTUGUESE("pt-PT"),
PORTUGUESE_BRASIL("pt-BR"),
ROMANIAN("ro-RO"),
RUSSIAN("ru"),
SERBIAN("sr-SP"),
SLOVAK("sk"),
SPANISH_ARGENTINA("es-AR"),
SPANISH_BOLIVIA("es-BO"),
SPANISH_CHILE("es-CL"),
SPANISH_COLOMBIA("es-CO"),
SPANISH_COSTA_RICA("es-CR"),
SPANISH_DOMINICAN_REPUBLIC("es-DO"),
SPANISH_ECUADOR("es-EC"),
SPANISH_EL_SALVADOR("es-SV"),
SPANISH_GUATEMALA("es-GT"),
SPANISH_HONDURAS("es-HN"),
SPANISH_MEXICO("es-MX"),
SPANISH_NICARAGUA("es-NI"),
SPANISH_PANAMA("es-PA"),
SPANISH_PARAGUAY("es-PY"),
SPANISH_PERU("es-PE"),
SPANISH_PUERTO_RICO("es-PR"),
SPANISH_SPAIN("es-ES"),
SPANISH_US("es-US"),
SPANISH_URUGUAY("es-UY"),
SPANISH_VENEZUELA("es-VE"),
SWEDISH("sv-SE"),
TURKISH("tr"),
ZULU("zu");
//TODO Clean Up JavaDoc for Overloaded Methods using @link
/**
*Stores the LanguageCode
*/
private final String languageCode;
/**
*Constructor
*/
private Languages(final String languageCode){
this.languageCode = languageCode;
}
public String toString(){
return languageCode;
}
}
/**
* URL to POST audio data and retrieve results
*/
private static final String GOOGLE_RECOGNIZER_URL = "https://www.google.com/speech-api/v1/recognize?xjerr=1&client=chromium";
private boolean profanityFilter = true;
private String language = null;
/**
* Constructor
*/
public Recognizer() {
this.setLanguage(Languages.AUTO_DETECT);
}
/**
* Constructor
* @param Language
*/
@Deprecated
public Recognizer(String language) {
this.language = language;
}
/**
* Constructor
* @param language The Languages class for the language you want to designate
*/
public Recognizer(Languages language){
this.language = language.languageCode;
}
/**
* Constructor
* @param profanityFilter
*/
public Recognizer(boolean profanityFilter){
this.profanityFilter = profanityFilter;
}
/**
* Constructor
* @param language
* @param profanityFilter
*/
@Deprecated
public Recognizer(String language, boolean profanityFilter){
this.language = language;
this.profanityFilter = profanityFilter;
}
/**
* Constructor
* @param language
* @param profanityFilter
*/
public Recognizer(Languages language, boolean profanityFilter){
this.language = language.languageCode;
this.profanityFilter = profanityFilter;
}
/**
* Language: Contains all supported languages for Google Speech to Text.
* Setting this to null will make Google use it's own language detection.
* This value is null by default.
* @param language
*/
public void setLanguage(Languages language) {
this.language = language.languageCode;
}
/**Language code. This language code must match the language of the speech to be recognized. ex. en-US ru-RU
* This value is null by default.
* @param language The language code.
*/
@Deprecated
public void setLanguage(String language) {
this.language = language;
}
/**
* Returns the state of profanityFilter
* which enables/disables Google's profanity filter (on by default).
* @return profanityFilter
*/
public boolean getProfanityFilter(){
return profanityFilter;
}
/**
* Language code. This language code must match the language of the speech to be recognized. ex. en-US ru-RU
* This value is null by default.
* @return language the Google language
*/
public String getLanguage(){
return language;
}
/**
* Get recognized data from a Wave file. This method will encode the wave file to a FLAC file
*
* @param waveFile Wave file to recognize
* @param maxResults Maximum number of results to return in response
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForWave(File waveFile, int maxResults) throws IOException{
FlacEncoder flacEncoder = new FlacEncoder();
File flacFile = new File(waveFile + ".flac");
flacEncoder.convertWaveToFlac(waveFile, flacFile);
GoogleResponse googleResponse = getRecognizedDataForFlac(flacFile, maxResults, 8000);
//Delete converted FLAC data
flacFile.delete();
return googleResponse;
}
/**
* Get recognized data from a Wave file. This method will encode the wave file to a FLAC
*
* @param waveFile Wave file to recognize
* @param maxResults the maximum number of results to return in the response
* NOTE: Sample rate of file must be 8000 unless a custom sample rate is specified.
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForWave(String waveFile, int maxResults) throws IOException {
return getRecognizedDataForWave(new File(waveFile), maxResults);
}
/**
* Get recognized data from a FLAC file.
*
* @param flacFile FLAC file to recognize
* @param maxResults the maximum number of results to return in the response
* NOTE: Sample rate of file must be 8000 unless a custom sample rate is specified.
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(File flacFile, int maxResults) throws IOException {
return getRecognizedDataForFlac(flacFile, maxResults, 8000);
}
/**
* Get recognized data from a FLAC file.
*
* @param flacFile FLAC file to recognize
* @param maxResults the maximum number of results to return in the response
* @param samepleRate The sampleRate of the file. Default is 8000.
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(File flacFile, int maxResults, int sampleRate) throws IOException{
String response = rawRequest(flacFile, maxResults, sampleRate);
GoogleResponse googleResponse = new GoogleResponse();
parseResponse(response, googleResponse);
return googleResponse;
}
/**
* Get recognized data from a FLAC file.
*
* @param flacFile FLAC file to recognize
* @param maxResults the maximum number of results to return in the response
* @param samepleRate The sampleRate of the file. Default is 8000.
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(String flacFile, int maxResults, int sampleRate) throws IOException{
return getRecognizedDataForFlac(new File(flacFile), maxResults, sampleRate);
}
/**
* Get recognized data from a FLAC file.
*
* @param flacFile FLAC file to recognize
* @param maxResults the maximum number of results to return in the response
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(String flacFile, int maxResults) throws IOException {
return getRecognizedDataForFlac(new File(flacFile), maxResults);
}
/**
* Get recognized data from a Wave file. This method will encode the wave file to a FLAC.
* This method will automatically set the language to en-US, or English
*
* @param waveFile Wave file to recognize
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForWave(File waveFile) throws IOException {
return getRecognizedDataForWave(waveFile, 1);
}
/**
* Get recognized data from a Wave file. This method will encode the wave file to a FLAC.
* This method will automatically set the language to en-US, or English
*
* @param waveFile Wave file to recognize
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForWave(String waveFile) throws IOException {
return getRecognizedDataForWave(waveFile, 1);
}
/**
* Get recognized data from a FLAC file.
* This method will automatically set the language to en-US, or English
*
* @param flacFile FLAC file to recognize
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(File flacFile) throws IOException {
return getRecognizedDataForFlac(flacFile, 1);
}
/**
* Get recognized data from a FLAC file.
* This method will automatically set the language to en-US, or English
*
* @param flacFile FLAC file to recognize
* @return Returns a GoogleResponse, with the response and confidence score
* @throws IOException Throws exception if something goes wrong
*/
public GoogleResponse getRecognizedDataForFlac(String flacFile) throws IOException {
return getRecognizedDataForFlac(flacFile, 1);
}
/**
* Parses the raw response from Google
*
* @param rawResponse The raw, unparsed response from Google
* @return Returns the parsed response in the form of a Google Response.
*/
private void parseResponse(String rawResponse, GoogleResponse googleResponse) {
if (rawResponse == null || !rawResponse.contains("utterance"))
return;
String array = StringUtil.substringBetween(rawResponse, "[", "]");
String[] parts = array.split("}");
boolean first = true;
for( String s : parts ) {
if( first ) {
first = false;
String utterancePart = s.split(",")[0];
String confidencePart = s.split(",")[1];
String utterance = utterancePart.split(":")[1];
String confidence = confidencePart.split(":")[1];
utterance = StringUtil.stripQuotes(utterance);
confidence = StringUtil.stripQuotes(confidence);
if( utterance.equals("null") ) {
utterance = null;
}
if( confidence.equals("null") ) {
confidence = null;
}
googleResponse.setResponse(utterance);
googleResponse.setConfidence(confidence);
} else {
String utterance = s.split(":")[1];
utterance = StringUtil.stripQuotes(utterance);
if( utterance.equals("null") ) {
utterance = null;
}
googleResponse.getOtherPossibleResponses().add(utterance);
}
}
}
/**
* Performs the request to Google with a file <br>
* Request is buffered
*
* @param inputFile Input files to recognize
* @return Returns the raw, unparsed response from Google
* @throws IOException Throws exception if something went wrong
*/
private String rawRequest(File inputFile, int maxResults, int sampleRate) throws IOException{
URL url;
URLConnection urlConn;
OutputStream outputStream;
BufferedReader br;
StringBuilder sb = new StringBuilder(GOOGLE_RECOGNIZER_URL);
if( language != null ) {
sb.append("&lang=");
sb.append(language);
}
else{
sb.append("&lang=auto");
}
if( !profanityFilter ) {
sb.append("&pfilter=0");
}
sb.append("&maxresults=");
sb.append(maxResults);
// URL of Remote Script.
url = new URL(sb.toString());
// Open New URL connection channel.
urlConn = url.openConnection();
// we want to do output.
urlConn.setDoOutput(true);
// No caching
urlConn.setUseCaches(false);
// Specify the header content type.
urlConn.setRequestProperty("Content-Type", "audio/x-flac; rate=" + sampleRate);
// Send POST output.
outputStream = urlConn.getOutputStream();
FileInputStream fileInputStream = new FileInputStream(inputFile);
byte[] buffer = new byte[256];
while ((fileInputStream.read(buffer, 0, 256)) != -1) {
outputStream.write(buffer, 0, 256);
}
fileInputStream.close();
outputStream.close();
// Get response data.
br = new BufferedReader(new InputStreamReader(urlConn.getInputStream(), Charset.forName("UTF-8")));
String response = br.readLine();
br.close();
return response;
}
}

View file

@ -0,0 +1,282 @@
package com.darkprograms.speech.recognizer;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.List;
import javax.net.ssl.HttpsURLConnection;
import javax.xml.ws.http.HTTPException;
import com.darkprograms.speech.util.StringUtil;
/**
* This class uses Google's V2 Hook. The class is returns a chunked respones so listeners must be used.
* The class also requires an API-Key (see Constructor) for details. This class is experimental and
* subject to change as we restructure the API.
* @author Aaron Gokaslan (Skylion)
*/
public class RecognizerChunked {
/**
* Google's API V2 URL
*/
private static final String GOOGLE_SPEECH_URL_V2 = "https://www.google.com/speech-api/v2/recognize";
/**
* API-Key used for requests
*/
private final String API_KEY;
/**
* The language code Google uses to determine the language
* Default value is "auto"
*/
private String language;
/**
* Stores the Response Listeners
*/
private List<GSpeechResponseListener> responseListeners = new ArrayList<GSpeechResponseListener>();
/**
* Constructor
* @param API_KEY The API-Key for Google's Speech API. An API key can be obtained by requesting
* one by following the process shown at this
* <a href="http://www.chromium.org/developers/how-tos/api-keys">url</a>.
*/
public RecognizerChunked(String API_KEY){
this.API_KEY = API_KEY;
this.language = "auto";
}
/**
* Constructor
* @param API_KEY The API-Key for Google's Speech API. An API key can be obtained by requesting
* one by following the process shown at this
* <a href="http://www.chromium.org/developers/how-tos/api-keys">url</a>.
* @param language The language you want to use (Iso code)
* Note: This function will most likely be deprecated.
*/
public RecognizerChunked(String API_KEY, String language){
this(API_KEY);
this.language = language;
}
/**
* The current language the Recognizer is set to use. Returns the ISO-Code otherwise,
* it may return "auto."
* @return The ISO-Code or auto if the language the is not specified.
*/
public String getLanguage(){
return language;
}
/**
* Sets the language that the file should return.
* @param language The language as an ISO-Code
*/
public void setLanguage(String language){
this.language = language;
}
/**
* Analyzes the file for speech
* @param infile The file you want to analyze for speech.
* @param sampleRate The sample rate of the audioFile.
* @throws IOException if something goes wrong reading the file.
*/
public void getRecognizedDataForFlac(File infile, int sampleRate) throws IOException{
byte[] data = mapFileIn(infile);
getRecognizedDataForFlac(data, sampleRate);
}
/**
* Analyzes the file for speech
* @param infile The file you want to analyze for speech.
* @param sampleRate The sample rate of the audioFile.
* @throws IOException if something goes wrong reading the file.
*/
public void getRecognizedDataForFlac(String inFile, int sampleRate) throws IOException{
getRecognizedDataForFlac(new File(inFile), sampleRate);
}
/**
* Recognizes the byte data.
* @param data
* @param sampleRate
*/
public void getRecognizedDataForFlac(byte[] data, int sampleRate){
StringBuilder sb = new StringBuilder(GOOGLE_SPEECH_URL_V2);
sb.append("?output=json");
sb.append("&client=chromium");
sb.append("&lang=" + language);
sb.append("&key=" + API_KEY);
String url = sb.toString();
openHttpsPostConnection(url, data, sampleRate);
}
/**
* Opens a chunked response HTTPS line to the specified URL
* @param urlStr The URL string to connect for chunking
* @param data The data you want to send to Google. Speech files under 15 seconds long recommended.
* @param sampleRate The sample rate for your audio file.
*/
private void openHttpsPostConnection(final String urlStr, final byte[] data, final int sampleRate) {
new Thread () {
public void run() {
HttpsURLConnection httpConn = null;
ByteBuffer buff = ByteBuffer.wrap(data);
byte[] destdata = new byte[2048];
int resCode = -1;
OutputStream out = null;
try {
URL url = new URL(urlStr);
URLConnection urlConn = url.openConnection();
if (!(urlConn instanceof HttpsURLConnection)) {
throw new IOException ("URL must be HTTPS");
}
httpConn = (HttpsURLConnection)urlConn;
httpConn.setAllowUserInteraction(false);
httpConn.setInstanceFollowRedirects(true);
httpConn.setRequestMethod("POST");
httpConn.setDoOutput(true);
httpConn.setChunkedStreamingMode(0); //TransferType: chunked
httpConn.setRequestProperty("Content-Type", "audio/x-flac; rate=" + sampleRate);
// this opens a connection, then sends POST & headers.
out = httpConn.getOutputStream();
//beyond 15 sec duration just simply writing the file
// does not seem to work. So buffer it and delay to simulate
// bufferd microphone delivering stream of speech
// re: net.http.ChunkedOutputStream.java
while(buff.remaining() >= destdata.length){
buff.get(destdata);
out.write(destdata);
};
byte[] lastr = new byte[buff.remaining()];
buff.get(lastr, 0, lastr.length);
out.write(lastr);
out.close();
resCode = httpConn.getResponseCode();
if(resCode >= HttpURLConnection.HTTP_UNAUTHORIZED){//Stops here if Google doesn't like us/
throw new HTTPException(HttpURLConnection.HTTP_UNAUTHORIZED);//Throws
}
String line;//Each line that is read back from Google.
BufferedReader br = new BufferedReader(new InputStreamReader(httpConn.getInputStream()));
while ((line = br.readLine( )) != null) {
if(line.length()>19 && resCode > 100 && resCode < HttpURLConnection.HTTP_UNAUTHORIZED){
GoogleResponse gr = new GoogleResponse();
parseResponse(line, gr);
fireResponseEvent(gr);
}
}
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
finally {httpConn.disconnect();}
}
}.start();
}
/**
* Converts the file into a byte[].
* @param infile The File you want to specify
* @return a byte array
* @throws IOException if something goes wrong reading the file.
*/
private byte[] mapFileIn(File infile) throws IOException{
FileInputStream fis = new FileInputStream(infile);
try{
FileChannel fc = fis.getChannel(); // Get the file's size and then map it into memory
int sz = (int)fc.size();
MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0, sz);
byte[] data2 = new byte[bb.remaining()];
bb.get(data2);
return data2;
}
finally{//Ensures resources are closed regardless of whether the action suceeded
fis.close();
}
}
/**
* Parses the response into a Google Response
* @param rawResponse The raw String you want to parse
* @param gr The GoogleResponse you want to parse into ti.
*/
private void parseResponse(String rawResponse, GoogleResponse gr){
if(rawResponse == null || !rawResponse.contains("\"result\"")){ return; }
if(rawResponse.contains("\"confidence\":")){
String confidence = StringUtil.substringBetween(rawResponse, "\"confidence\":", "}");
gr.setConfidence(confidence);
}
else{
gr.setConfidence(String.valueOf(1d));
}
String array = StringUtil.trimString(rawResponse, "[", "]");
if(array.contains("[")){
array = StringUtil.trimString(array, "[", "]");
}
String[] parts = array.split(",");
gr.setResponse(parseTranscript(parts[0]));
for(int i = 1; i<parts.length; i++){
gr.getOtherPossibleResponses().add(parseTranscript(parts[i]));
}
}
/**
* Cleans up the transcript portion of the String
* @param s The string you want to process.
* @return The reformated string.
*/
private String parseTranscript(String s){
String tmp = s.substring(s.indexOf(":")+1);
if(s.endsWith("}")){
tmp = tmp.substring(0, tmp.length()-1);
}
tmp = StringUtil.stripQuotes(tmp);
return tmp;
}
/**
* Adds responseListener that triggers when a response from Google is recieved
* @param rl The response listener you want to add
*/
public synchronized void addResponseListener(GSpeechResponseListener rl){
responseListeners.add(rl);
}
/**
* Removes the specified response listener
* @param rl The response listener
*/
public synchronized void removeResponseListener(GSpeechResponseListener rl){
responseListeners.remove(rl);
}
/**
* Fires the response listener
* @param gr The GoogleResponse as the event object.
*/
private synchronized void fireResponseEvent(GoogleResponse gr){
for(GSpeechResponseListener gl: responseListeners){
gl.onResponse(gr);
}
}
}

View file

@ -0,0 +1,261 @@
package com.darkprograms.speech.synthesiser;
import java.io.IOException;
import java.io.InputStream;
import java.io.SequenceInputStream;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import com.darkprograms.speech.translator.GoogleTranslate;
/*******************************************************************************
* Synthesiser class that connects to Google's unoffical API to retrieve data
*
* @author Luke Kuza, Aaron Gokaslan (Skylion)
*******************************************************************************/
public class Synthesiser {
/**
* URL to query for Google synthesiser
*/
private final static String GOOGLE_SYNTHESISER_URL = "http://translate.google.com/translate_tts?tl=";
/**
* language of the Text you want to translate
*/
private String languageCode;
/**
* LANG_XX_XXXX Variables are language codes.
*/
public static final String LANG_AU_ENGLISH = "en-AU";
public static final String LANG_US_ENGLISH = "en-US";
public static final String LANG_UK_ENGLISH = "en-GB";
public static final String LANG_ES_SPANISH = "es";
public static final String LANG_FR_FRENCH = "fr";
public static final String LANG_DE_GERMAN = "de";
public static final String LANG_PT_PORTUGUESE = "pt-pt";
public static final String LANG_PT_BRAZILIAN = "pt-br";
//Please add on more regional languages as you find them. Also try to include the accent code if you can can.
/**
* Constructor
*/
public Synthesiser() {
languageCode = "auto";
}
/**
* Constructor that takes language code parameter. Specify to "auto" for language autoDetection
*/
public Synthesiser(String languageCode){
this.languageCode = languageCode;
}
/**
* Returns the current language code for the Synthesiser.
* Example: English(Generic) = en, English (US) = en-US, English (UK) = en-GB. and Spanish = es;
* @return the current language code parameter
*/
public String getLanguage(){
return languageCode;
}
/**
* Note: set language to auto to enable automatic language detection.
* Setting to null will also implement Google's automatic language detection
* @param languageCode The language code you would like to modify languageCode to.
*/
public void setLanguage(String languageCode){
this.languageCode = languageCode;
}
/**
* Gets an input stream to MP3 data for the returned information from a request
*
* @param synthText Text you want to be synthesized into MP3 data
* @return Returns an input stream of the MP3 data that is returned from Google
* @throws IOException Throws exception if it can not complete the request
*/
public InputStream getMP3Data(String synthText) throws IOException{
String languageCode = this.languageCode;//Ensures retention of language settings if set to auto
if(languageCode == null || languageCode.equals("") || languageCode.equalsIgnoreCase("auto")){
try{
languageCode = detectLanguage(synthText);//Detects language
if(languageCode == null){
languageCode = "en-us";//Reverts to Default Language if it can't detect it.
}
}
catch(Exception ex){
ex.printStackTrace();
languageCode = "en-us";//Reverts to Default Language if it can't detect it.
}
}
if(synthText.length()>100){
List<String> fragments = parseString(synthText);//parses String if too long
String tmp = getLanguage();
setLanguage(languageCode);//Keeps it from autodetecting each fragment.
InputStream out = getMP3Data(fragments);
setLanguage(tmp);//Reverts it to it's previous Language such as auto.
return out;
}
String encoded = URLEncoder.encode(synthText, "UTF-8"); //Encode
URL url = new URL(GOOGLE_SYNTHESISER_URL + languageCode + "&q=" + encoded); //create url
// Open New URL connection channel.
URLConnection urlConn = url.openConnection(); //Open connection
urlConn.addRequestProperty("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0) Gecko/20100101 Firefox/4.0"); //Adding header for user agent is required
return urlConn.getInputStream();
}
/**
* Gets an InputStream to MP3Data for the returned information from a request
* @param synthText List of Strings you want to be synthesized into MP3 data
* @return Returns an input stream of all the MP3 data that is returned from Google
* @throws IOException Throws exception if it cannot complete the request
*/
public InputStream getMP3Data(List<String> synthText) throws IOException{
//Uses an executor service pool for concurrency. Limit to 1000 threads max.
ExecutorService pool = Executors.newFixedThreadPool(1000);
//Stores the Future (Data that will be returned in the future)
Set<Future<InputStream>> set = new LinkedHashSet<Future<InputStream>>(synthText.size());
for(String part: synthText){ //Iterates through the list
Callable<InputStream> callable = new MP3DataFetcher(part);//Creates Callable
Future<InputStream> future = pool.submit(callable);//Begins to run Callable
set.add(future);//Adds the response that will be returned to a set.
}
List<InputStream> inputStreams = new ArrayList<InputStream>(set.size());
for(Future<InputStream> future: set){
try {
inputStreams.add(future.get());//Gets the returned data from the future.
} catch (ExecutionException e) {//Thrown if the MP3DataFetcher encountered an error.
Throwable ex = e.getCause();
if(ex instanceof IOException){
throw (IOException)ex;//Downcasts and rethrows it.
}
} catch (InterruptedException e){//Will probably never be called, but just in case...
Thread.currentThread().interrupt();//Interrupts the thread since something went wrong.
}
}
return new SequenceInputStream(Collections.enumeration(inputStreams));//Sequences the stream.
}
/**
* Separates a string into smaller parts so that Google will not reject the request.
* @param input The string you want to separate
* @return A List<String> of the String fragments from your input..
*/
private List<String> parseString(String input){
return parseString (input, new ArrayList<String>());
}
/**
* Separates a string into smaller parts so that Google will not reject the request.
* @param input The string you want to break up into smaller parts
* @param fragments List<String> that you want to add stuff too.
* If you don't have a List<String> already constructed "new ArrayList<String>()" works well.
* @return A list of the fragments of the original String
*/
private List<String> parseString(String input, List<String> fragments){
if(input.length()<=100){//Base Case
fragments.add(input);
return fragments;
}
else{
int lastWord = findLastWord(input);//Checks if a space exists
if(lastWord<=0){
fragments.add(input.substring(0,100));//In case you sent gibberish to Google.
return parseString(input.substring(100), fragments);
}else{
fragments.add(input.substring(0,lastWord));//Otherwise, adds the last word to the list for recursion.
return parseString(input.substring(lastWord), fragments);
}
}
}
/**
* Finds the last word in your String (before the index of 99) by searching for spaces and ending punctuation.
* Will preferably parse on punctuation to alleviate mid-sentence pausing
* @param input The String you want to search through.
* @return The index of where the last word of the string ends before the index of 99.
*/
private int findLastWord(String input){
if(input.length()<100)
return input.length();
int space = -1;
for(int i = 99; i>0; i--){
char tmp = input.charAt(i);
if(isEndingPunctuation(tmp)){
return i+1;
}
if(space==-1 && tmp == ' '){
space = i;
}
}
if(space>0){
return space;
}
return -1;
}
/**
* Checks if char is an ending character
* Ending punctuation for all languages according to Wikipedia (Except for Sanskrit non-unicode)
* @param The char you want check
* @return True if it is, false if not.
*/
private boolean isEndingPunctuation(char input){
return input == '.' || input == '!' || input == '?' || input == ';' || input == ':' || input == '|';
}
/**
* Automatically determines the language of the original text
* @param text represents the text you want to check the language of
* @return the languageCode in ISO-639
* @throws Exception if it cannot complete the request
*/
public String detectLanguage(String text) throws IOException{
return GoogleTranslate.detectLanguage(text);
}
/**
* This class is a callable.
* A callable is like a runnable except that it can return data and throw exceptions.
* Useful when using futures. Dramatically improves the speed of execution.
* @author Aaron Gokaslan (Skylion)
*/
private class MP3DataFetcher implements Callable<InputStream>{
private String synthText;
public MP3DataFetcher(String synthText){
this.synthText = synthText;
}
public InputStream call() throws IOException{
return getMP3Data(synthText);
}
}
}

View file

@ -0,0 +1,303 @@
package com.darkprograms.speech.synthesiser;
import java.io.IOException;
import java.io.InputStream;
import java.io.SequenceInputStream;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import com.darkprograms.speech.translator.GoogleTranslate;
/**
* This class uses the V2 version of Google's Text to Speech API. While this class requires an API key,
* the endpoint allows for additional specification of parameters including speed and pitch.
* See the constructor for instructions regarding the API_Key.
* @author Skylion (Aaron Gokaslan)
*/
public class SynthesiserV2 {
private static final String GOOGLE_SYNTHESISER_URL = "https://www.google.com/speech-api/v2/synthesize?enc=mpeg" +
"&client=chromium";
/**
* API_KEY used for requests
*/
private final String API_KEY;
/**
* language of the Text you want to translate
*/
private String languageCode;
/**
* The pitch of the generated audio
*/
private double pitch = 1.0;
/**
* The speed of the generated audio
*/
private double speed = 1.0;
/**
* Constructor
* @param API_KEY The API-Key for Google's Speech API. An API key can be obtained by requesting
* one by following the process shown at this
* <a href="http://www.chromium.org/developers/how-tos/api-keys">url</a>.
*/
public SynthesiserV2(String API_KEY){
this.API_KEY = API_KEY;
}
/**
* Returns the current language code for the Synthesiser.
* Example: English(Generic) = en, English (US) = en-US, English (UK) = en-GB. and Spanish = es;
* @return the current language code parameter
*/
public String getLanguage(){
return languageCode;
}
/**
* Note: set language to auto to enable automatic language detection.
* Setting to null will also implement Google's automatic language detection
* @param languageCode The language code you would like to modify languageCode to.
*/
public void setLanguage(String languageCode){
this.languageCode = languageCode;
}
/**
* @return the pitch
*/
public double getPitch() {
return pitch;
}
/**
* Sets the pitch of the audio.
* Valid values range from 0 to 2 inclusive.
* Values above 1 correspond to higher pitch, values below 1 correspond to lower pitch.
* @param pitch the pitch to set
*/
public void setPitch(double pitch) {
this.pitch = pitch;
}
/**
* @return the speed
*/
public double getSpeed() {
return speed;
}
/**
* Sets the speed of audio.
* Valid values range from 0 to 2 inclusive.
* Values higher than one correspond to faster and vice versa.
* @param speed the speed to set
*/
public void setSpeed(double speed) {
this.speed = speed;
}
/**
* Gets an input stream to MP3 data for the returned information from a request
*
* @param synthText Text you want to be synthesized into MP3 data
* @return Returns an input stream of the MP3 data that is returned from Google
* @throws IOException Throws exception if it can not complete the request
*/
public InputStream getMP3Data(String synthText) throws IOException{
String languageCode = this.languageCode;//Ensures retention of language settings if set to auto
if(languageCode == null || languageCode.equals("") || languageCode.equalsIgnoreCase("auto")){
try{
languageCode = detectLanguage(synthText);//Detects language
if(languageCode == null){
languageCode = "en-us";//Reverts to Default Language if it can't detect it.
}
}
catch(Exception ex){
ex.printStackTrace();
languageCode = "en-us";//Reverts to Default Language if it can't detect it.
}
}
if(synthText.length()>100){
List<String> fragments = parseString(synthText);//parses String if too long
String tmp = getLanguage();
setLanguage(languageCode);//Keeps it from autodetecting each fragment.
InputStream out = getMP3Data(fragments);
setLanguage(tmp);//Reverts it to it's previous Language such as auto.
return out;
}
String encoded = URLEncoder.encode(synthText, "UTF-8"); //Encode
StringBuilder sb = new StringBuilder(GOOGLE_SYNTHESISER_URL);
sb.append("&key=" + API_KEY);
sb.append("&text=" + encoded);
sb.append("&lang=" + languageCode);
if(speed>=0 && speed<=2.0){
sb.append("&speed=" + speed/2.0);
}
if(pitch>=0 && pitch<=2.0){
sb.append("&pitch=" + pitch/2.0);
}
URL url = new URL(sb.toString()); //create url
// Open New URL connection channel.
URLConnection urlConn = url.openConnection(); //Open connection
urlConn.addRequestProperty("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0) Gecko/20100101 Firefox/4.0"); //Adding header for user agent is required
return urlConn.getInputStream();
}
/**
* Gets an InputStream to MP3Data for the returned information from a request
* @param synthText List of Strings you want to be synthesized into MP3 data
* @return Returns an input stream of all the MP3 data that is returned from Google
* @throws IOException Throws exception if it cannot complete the request
*/
public InputStream getMP3Data(List<String> synthText) throws IOException{
//Uses an executor service pool for concurrency. Limit to 1000 threads max.
ExecutorService pool = Executors.newFixedThreadPool(1000);
//Stores the Future (Data that will be returned in the future)
Set<Future<InputStream>> set = new LinkedHashSet<Future<InputStream>>(synthText.size());
for(String part: synthText){ //Iterates through the list
Callable<InputStream> callable = new MP3DataFetcher(part);//Creates Callable
Future<InputStream> future = pool.submit(callable);//Begins to run Callable
set.add(future);//Adds the response that will be returned to a set.
}
List<InputStream> inputStreams = new ArrayList<InputStream>(set.size());
for(Future<InputStream> future: set){
try {
inputStreams.add(future.get());//Gets the returned data from the future.
} catch (ExecutionException e) {//Thrown if the MP3DataFetcher encountered an error.
Throwable ex = e.getCause();
if(ex instanceof IOException){
throw (IOException)ex;//Downcasts and rethrows it.
}
} catch (InterruptedException e){//Will probably never be called, but just in case...
Thread.currentThread().interrupt();//Interrupts the thread since something went wrong.
}
}
return new SequenceInputStream(Collections.enumeration(inputStreams));//Sequences the stream.
}
/**
* Separates a string into smaller parts so that Google will not reject the request.
* @param input The string you want to separate
* @return A List<String> of the String fragments from your input..
*/
private List<String> parseString(String input){
return parseString (input, new ArrayList<String>());
}
/**
* Separates a string into smaller parts so that Google will not reject the request.
* @param input The string you want to break up into smaller parts
* @param fragments List<String> that you want to add stuff too.
* If you don't have a List<String> already constructed "new ArrayList<String>()" works well.
* @return A list of the fragments of the original String
*/
private List<String> parseString(String input, List<String> fragments){
if(input.length()<=100){//Base Case
fragments.add(input);
return fragments;
}
else{
int lastWord = findLastWord(input);//Checks if a space exists
if(lastWord<=0){
fragments.add(input.substring(0,100));//In case you sent gibberish to Google.
return parseString(input.substring(100), fragments);
}else{
fragments.add(input.substring(0,lastWord));//Otherwise, adds the last word to the list for recursion.
return parseString(input.substring(lastWord), fragments);
}
}
}
/**
* Finds the last word in your String (before the index of 99) by searching for spaces and ending punctuation.
* Will preferably parse on punctuation to alleviate mid-sentence pausing
* @param input The String you want to search through.
* @return The index of where the last word of the string ends before the index of 99.
*/
private int findLastWord(String input){
if(input.length()<100)
return input.length();
int space = -1;
for(int i = 99; i>0; i--){
char tmp = input.charAt(i);
if(isEndingPunctuation(tmp)){
return i+1;
}
if(space==-1 && tmp == ' '){
space = i;
}
}
if(space>0){
return space;
}
return -1;
}
/**
* Checks if char is an ending character
* Ending punctuation for all languages according to Wikipedia (Except for Sanskrit non-unicode)
* @param The char you want check
* @return True if it is, false if not.
*/
private boolean isEndingPunctuation(char input){
return input == '.' || input == '!' || input == '?' || input == ';' || input == ':' || input == '|';
}
/**
* Automatically determines the language of the original text
* @param text represents the text you want to check the language of
* @return the languageCode in ISO-639
* @throws Exception if it cannot complete the request
*/
public String detectLanguage(String text) throws IOException{
return GoogleTranslate.detectLanguage(text);
}
/**
* This class is a callable.
* A callable is like a runnable except that it can return data and throw exceptions.
* Useful when using futures. Dramatically improves the speed of execution.
* @author Aaron Gokaslan (Skylion)
*/
private class MP3DataFetcher implements Callable<InputStream>{
private String synthText;
public MP3DataFetcher(String synthText){
this.synthText = synthText;
}
public InputStream call() throws IOException{
return getMP3Data(synthText);
}
}
}

View file

@ -0,0 +1,168 @@
package com.darkprograms.speech.translator;
import java.io.IOException;
import java.io.Reader;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.nio.charset.Charset;
import java.util.Locale;
/***************************************************************************************************************
* An API for a Google Translation service in Java.
* Please Note: This API is unofficial and is not supported by Google. Subject to breakage at any time.
* The translator allows for language detection and translation.
* Recommended for translation of user interfaces or speech commands.
* All translation services provided via Google Translate
* @author Aaron Gokaslan (Skylion)
***************************************************************************************************************/
public final class GoogleTranslate { //Class marked as final since all methods are static
/**
* URL to query for Translation
*/
private final static String GOOGLE_TRANSLATE_URL = "http://translate.google.com/translate_a/t?client=t";
/**
* Private to prevent instantiation
*/
private GoogleTranslate(){};
/**
* Converts the ISO-639 code into a friendly language code in the user's default language
* For example, if the language is English and the default locale is French, it will return "anglais"
* Useful for UI Strings
* @param languageCode The ISO639-1
* @return The language in the user's default language
* @see {@link #detectLanguage}
*/
public static String getDisplayLanguage(String languageCode){
return (new Locale(languageCode)).getDisplayLanguage();
}
/**
* Automatically determines the language of the original text
* @param text represents the text you want to check the language of
* @return The ISO-639 code for the language
* @throws IOException if it cannot complete the request
*/
public static String detectLanguage(String text) throws IOException{
String encoded = URLEncoder.encode(text, "UTF-8"); //Encodes the string
URL url = new URL(GOOGLE_TRANSLATE_URL + "&text=" + encoded); //Generates URL
String rawData = urlToText(url);//Gets text from Google
return findLanguage(rawData);
}
/**
* Automatically translates text to a system's default language according to its locale
* Useful for creating international applications as you can translate UI strings
* @param text The text you want to translate
* @return The translated text
* @throws IOException if cannot complete request
*/
public static String translate(String text) throws IOException{
return translate(Locale.getDefault().getLanguage(), text);
}
/**
* Automatically detects language and translate to the targetLanguage
* @param targetLanguage The language you want to translate into in ISO-639 format
* @param text The text you actually want to translate
* @return The translated text.
* @throws IOException if it cannot complete the request
*/
public static String translate(String targetLanguage, String text) throws IOException{
return translate("auto",targetLanguage, text);
}
/**
* Translate text from sourceLanguage to targetLanguage
* Specifying the sourceLanguage greatly improves accuracy over short Strings
* @param sourceLanguage The language you want to translate from in ISO-639 format
* @param targetLanguage The language you want to translate into in ISO-639 format
* @param text The text you actually want to translate
* @return the translated text.
* @throws IOException if it cannot complete the request
*/
public static String translate(String sourceLanguage, String targetLanguage, String text) throws IOException{
String encoded = URLEncoder.encode(text, "UTF-8"); //Encode
//Generates URL
URL url = new URL(GOOGLE_TRANSLATE_URL + "&sl=" + sourceLanguage + "&tl=" + targetLanguage + "&text=" + encoded);
String rawData = urlToText(url);//Gets text from Google
if(rawData==null){
return null;
}
String[] raw = rawData.split("\"");//Parses the JSON
if(raw.length<2){
return null;
}
return raw[1];//Returns the translation
}
/**
* Converts a URL to Text
* @param url that you want to generate a String from
* @return The generated String
* @throws IOException if it cannot complete the request
*/
private static String urlToText(URL url) throws IOException{
URLConnection urlConn = url.openConnection(); //Open connection
//Adding header for user agent is required. Otherwise, Google rejects the request
urlConn.addRequestProperty("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0) Gecko/20100101 Firefox/4.0");
Reader r = new java.io.InputStreamReader(urlConn.getInputStream(), Charset.forName("UTF-8"));//Gets Data Converts to string
StringBuilder buf = new StringBuilder();
while (true) {//Reads String from buffer
int ch = r.read();
if (ch < 0)
break;
buf.append((char) ch);
}
String str = buf.toString();
return str;
}
/**
* Searches RAWData for Language
* @param RAWData the raw String directly from Google you want to search through
* @return The language parsed from the rawData or en-US (English-United States) if Google cannot determine it.
*/
private static String findLanguage(String rawData){
for(int i = 0; i+5<rawData.length(); i++){
boolean dashDetected = rawData.charAt(i+4)=='-';
if(rawData.charAt(i)==',' && rawData.charAt(i+1)== '"'
&& ((rawData.charAt(i+4)=='"' && rawData.charAt(i+5)==',')
|| dashDetected)){
if(dashDetected){
int lastQuote = rawData.substring(i+2).indexOf('"');
if(lastQuote>0)
return rawData.substring(i+2,i+2+lastQuote);
}
else{
String possible = rawData.substring(i+2,i+4);
if(containsLettersOnly(possible)){//Required due to Google's inconsistent formatting.
return possible;
}
}
}
}
return null;
}
/**
* Checks if all characters in text are letters.
* @param text The text you want to determine the validity of.
* @return True if all characters are letter, otherwise false.
*/
private static boolean containsLettersOnly(String text){
for(int i = 0; i<text.length(); i++){
if(!Character.isLetter(text.charAt(i))){
return false;
}
}
return true;
}
}

View file

@ -0,0 +1,190 @@
package com.darkprograms.speech.util;
//TODO Replace this class with something that isn't 20 years old.
//ChunkedOutputStream - an OutputStream that implements HTTP/1.1 chunking
//
//Copyright (C) 1996 by Jef Poskanzer <jef@acme.com>. All rights reserved.
//
//Redistribution and use in source and binary forms, with or without
//modification, are permitted provided that the following conditions
//are met:
//1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
//THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
//ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
//IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
//ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
//FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
//DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
//OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
//HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
//OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
//SUCH DAMAGE.
//
//Visit the ACME Labs Java page for up-to-date versions of this and other
//fine Java utilities: http://www.acme.com/java/
import java.io.*;
import java.util.*;
/// An OutputStream that implements HTTP/1.1 chunking.
//<P>
//This class lets a Servlet send its response data as an HTTP/1.1 chunked
//stream. Chunked streams are a way to send arbitrary-length data without
//having to know beforehand how much you're going to send. They are
//introduced by a "Transfer-Encoding: chunked" header, so you have to
//set that header when you make one of these streams.
//<P>
//Sample usage:
//<BLOCKQUOTE><PRE><CODE>
//res.setHeader( "Transfer-Encoding", "chunked" );
//OutputStream out = res.getOutputStream();
//ChunkedOutputStream chunkOut = new ChunkedOutputStream( out );
//(write data to chunkOut instead of out)
//(optionally set footers)
//chunkOut.done();
//</CODE></PRE></BLOCKQUOTE>
//<P>
//Every time the stream gets flushed, a chunk is sent. When done()
//is called, an empty chunk is sent, marking the end of the chunked
//stream as per the chunking spec.
//<P>
//<A HREF="/resources/classes/Acme/Serve/servlet/http/ChunkedOutputStream.java">Fetch the software.</A><BR>
//<A HREF="/resources/classes/Acme.tar.Z">Fetch the entire Acme package.</A>
public class ChunkedOutputStream extends BufferedOutputStream
{
/// Make a ChunkedOutputStream with a default buffer size.
// @param out the underlying output stream
public ChunkedOutputStream( OutputStream out )
{
super( out );
}
/// Make a ChunkedOutputStream with a specified buffer size.
// @param out the underlying output stream
// @param size the buffer size
public ChunkedOutputStream( OutputStream out, int size )
{
super( out, size );
}
/// Flush the stream. This will write any buffered output
// bytes as a chunk.
// @exception IOException if an I/O error occurred
public synchronized void flush() throws IOException
{
if ( count != 0 )
{
writeBuf( buf, 0, count );
count = 0;
}
}
private Vector footerNames = new Vector();
private Vector footerValues = new Vector();
/// Set a footer. Footers are much like HTTP headers, except that
// they come at the end of the data instead of at the beginning.
public void setFooter( String name, String value )
{
footerNames.addElement( name );
footerValues.addElement( value );
}
/// Indicate the end of the chunked data by sending a zero-length chunk,
// possible including footers.
// @exception IOException if an I/O error occurred
public void done() throws IOException
{
flush();
PrintStream pout = new PrintStream( out );
pout.println( "0" );
if ( footerNames.size() > 0 )
{
// Send footers.
for ( int i = 0; i < footerNames.size(); ++i )
{
String name = (String) footerNames.elementAt( i );
String value = (String) footerValues.elementAt( i );
pout.println( name + ": " + value );
}
}
footerNames = null;
footerValues = null;
pout.println( "" );
pout.flush();
}
/// Make sure that calling close() terminates the chunked stream.
public void close() throws IOException
{
if ( footerNames != null )
done();
super.close();
}
/// Write a sub-array of bytes.
// <P>
// The only reason we have to override the BufferedOutputStream version
// of this is that it writes the array directly to the output stream
// if doesn't fit in the buffer. So we make it use our own chunk-write
// routine instead. Otherwise this is identical to the parent-class
// version.
// @param b the data to be written
// @param off the start offset in the data
// @param len the number of bytes that are written
// @exception IOException if an I/O error occurred
public synchronized void write( byte b[], int off, int len ) throws IOException
{
int avail = buf.length - count;
if ( len <= avail )
{
System.arraycopy( b, off, buf, count, len );
count += len;
return;
}
flush();
writeBuf( b, off, len );
}
private static final byte[] crlf = { 13, 10 };
private byte[] lenBytes = new byte[20]; // big enough for any number in hex
/// The only routine that actually writes to the output stream.
// This is where chunking semantics are implemented.
// @exception IOException if an I/O error occurred
private void writeBuf( byte b[], int off, int len ) throws IOException
{
// Write the chunk length as a hex number.
String lenStr = Integer.toString( len, 16 );
lenStr.getBytes( 0, lenStr.length(), lenBytes, 0 );
out.write( lenBytes );
// Write a CRLF.
out.write( crlf );
// Write the data.
if ( len != 0 )
out.write( b, off, len );
// Write a CRLF.
out.write( crlf );
// And flush the real stream.
out.flush();
}
}

View file

@ -0,0 +1,120 @@
package com.darkprograms.speech.util;
/*************************************************************************
* Compilation: javac Complex.java
* Execution: java Complex
*
* Data type for complex numbers.
*
* The data type is "immutable" so once you create and initialize
* a Complex object, you cannot change it. The "final" keyword
* when declaring re and im enforces this rule, making it a
* compile-time error to change the .re or .im fields after
* they've been initialized.
*
* Class based off of Princeton University's Complex.java class
* @author Aaron Gokaslan, Princeton University
*************************************************************************/
public class Complex {
private final double re; // the real part
private final double im; // the imaginary part
// create a new object with the given real and imaginary parts
public Complex(double real, double imag) {
re = real;
im = imag;
}
// return a string representation of the invoking Complex object
public String toString() {
if (im == 0) return re + "";
if (re == 0) return im + "i";
if (im < 0) return re + " - " + (-im) + "i";
return re + " + " + im + "i";
}
// return abs/modulus/magnitude and angle/phase/argument
public double abs() { return Math.hypot(re, im); } // Math.sqrt(re*re + im*im)
public double phase() { return Math.atan2(im, re); } // between -pi and pi
// return a new Complex object whose value is (this + b)
public Complex plus(Complex b) {
Complex a = this; // invoking object
double real = a.re + b.re;
double imag = a.im + b.im;
return new Complex(real, imag);
}
// return a new Complex object whose value is (this - b)
public Complex minus(Complex b) {
Complex a = this;
double real = a.re - b.re;
double imag = a.im - b.im;
return new Complex(real, imag);
}
// return a new Complex object whose value is (this * b)
public Complex times(Complex b) {
Complex a = this;
double real = a.re * b.re - a.im * b.im;
double imag = a.re * b.im + a.im * b.re;
return new Complex(real, imag);
}
// scalar multiplication
// return a new object whose value is (this * alpha)
public Complex times(double alpha) {
return new Complex(alpha * re, alpha * im);
}
// return a new Complex object whose value is the conjugate of this
public Complex conjugate() { return new Complex(re, -im); }
// return a new Complex object whose value is the reciprocal of this
public Complex reciprocal() {
double scale = re*re + im*im;
return new Complex(re / scale, -im / scale);
}
// return the real or imaginary part
public double re() { return re; }
public double im() { return im; }
// return a / b
public Complex divides(Complex b) {
Complex a = this;
return a.times(b.reciprocal());
}
// return a new Complex object whose value is the complex exponential of this
public Complex exp() {
return new Complex(Math.exp(re) * Math.cos(im), Math.exp(re) * Math.sin(im));
}
// return a new Complex object whose value is the complex sine of this
public Complex sin() {
return new Complex(Math.sin(re) * Math.cosh(im), Math.cos(re) * Math.sinh(im));
}
// return a new Complex object whose value is the complex cosine of this
public Complex cos() {
return new Complex(Math.cos(re) * Math.cosh(im), -Math.sin(re) * Math.sinh(im));
}
// return a new Complex object whose value is the complex tangent of this
public Complex tan() {
return sin().divides(cos());
}
// returns the magnitude of the imaginary number.
public double getMagnitude(){
return Math.sqrt(re*re+im*im);
}
public boolean equals(Complex other){
return (re==other.re) && (im==other.im);
}
}

View file

@ -0,0 +1,133 @@
package com.darkprograms.speech.util;
/*************************************************************************
* Compilation: javac FFT.java
* Execution: java FFT N
* Dependencies: Complex.java
*
* Compute the FFT and inverse FFT of a length N complex sequence.
* Bare bones implementation that runs in O(N log N) time. Our goal
* is to optimize the clarity of the code, rather than performance.
*
* Limitations
* -----------
* - assumes N is a power of 2
*
* - not the most memory efficient algorithm (because it uses
* an object type for representing complex numbers and because
* it re-allocates memory for the subarray, instead of doing
* in-place or reusing a single temporary array)
*
*************************************************************************/
/*************************************************************************
* @author Skylion implementation
* @author Princeton University for the actual algorithm.
************************************************************************/
public class FFT {
// compute the FFT of x[], assuming its length is a power of 2
public static Complex[] fft(Complex[] x) {
int N = x.length;
// base case
if (N == 1) return new Complex[] { x[0] };
// radix 2 Cooley-Tukey FFT
if (N % 2 != 0) { throw new RuntimeException("N is not a power of 2"); }
// fft of even terms
Complex[] even = new Complex[N/2];
for (int k = 0; k < N/2; k++) {
even[k] = x[2*k];
}
Complex[] q = fft(even);
// fft of odd terms
Complex[] odd = even; // reuse the array
for (int k = 0; k < N/2; k++) {
odd[k] = x[2*k + 1];
}
Complex[] r = fft(odd);
// combine
Complex[] y = new Complex[N];
for (int k = 0; k < N/2; k++) {
double kth = -2 * k * Math.PI / N;
Complex wk = new Complex(Math.cos(kth), Math.sin(kth));
y[k] = q[k].plus(wk.times(r[k]));
y[k + N/2] = q[k].minus(wk.times(r[k]));
}
return y;
}
// compute the inverse FFT of x[], assuming its length is a power of 2
public static Complex[] ifft(Complex[] x) {
int N = x.length;
Complex[] y = new Complex[N];
// take conjugate
for (int i = 0; i < N; i++) {
y[i] = x[i].conjugate();
}
// compute forward FFT
y = fft(y);
// take conjugate again
for (int i = 0; i < N; i++) {
y[i] = y[i].conjugate();
}
// divide by N
for (int i = 0; i < N; i++) {
y[i] = y[i].times(1.0 / N);
}
return y;
}
// compute the circular convolution of x and y
public static Complex[] cconvolve(Complex[] x, Complex[] y) {
// should probably pad x and y with 0s so that they have same length
// and are powers of 2
if (x.length != y.length) { throw new RuntimeException("Dimensions don't agree"); }
int N = x.length;
// compute FFT of each sequence
Complex[] a = fft(x);
Complex[] b = fft(y);
// point-wise multiply
Complex[] c = new Complex[N];
for (int i = 0; i < N; i++) {
c[i] = a[i].times(b[i]);
}
// compute inverse FFT
return ifft(c);
}
// compute the linear convolution of x and y
public static Complex[] convolve(Complex[] x, Complex[] y) {
Complex ZERO = new Complex(0, 0);
Complex[] a = new Complex[2*x.length];
for (int i = 0; i < x.length; i++) a[i] = x[i];
for (int i = x.length; i < 2*x.length; i++) a[i] = ZERO;
Complex[] b = new Complex[2*y.length];
for (int i = 0; i < y.length; i++) b[i] = y[i];
for (int i = y.length; i < 2*y.length; i++) b[i] = ZERO;
return cconvolve(a, b);
}
}

View file

@ -0,0 +1,69 @@
package com.darkprograms.speech.util;
/**
* A string utility class for commonly used methods.
* These methods are particularly useful for parsing.
* @author Skylion
*/
public class StringUtil {
private StringUtil() {};//Prevents instantiation
/**
* Removes quotation marks from beginning and end of string.
* @param s The string you want to remove the quotation marks from.
* @return The modified String.
*/
public static String stripQuotes(String s) {
int start = 0;
if( s.startsWith("\"") ) {
start = 1;
}
int end = s.length();
if( s.endsWith("\"") ) {
end = s.length() - 1;
}
return s.substring(start, end);
}
/**
* Returns the first instance of String found exclusively between part1 and part2.
* @param s The String you want to substring.
* @param part1 The beginning of the String you want to search for.
* @param part2 The end of the String you want to search for.
* @return The String between part1 and part2.
* If the s does not contain part1 or part2, the method returns null.
*/
public static String substringBetween(String s, String part1, String part2) {
String sub = null;
int i = s.indexOf(part1);
int j = s.indexOf(part2, i + part1.length());
if (i != -1 && j != -1) {
int nStart = i + part1.length();
sub = s.substring(nStart, j);
}
return sub;
}
/**
* Gets the string exclusively between the first instance of part1 and the last instance of part2.
* @param s The string you want to trim.
* @param part1 The term to trim after first instance.
* @param part2 The term to before last instance of.
* @return The trimmed String
*/
public static String trimString(String s, String part1, String part2){
if(!s.contains(part1) || !s.contains(part2)){
return null;
}
int first = s.indexOf(part1) + part1.length() + 1;
String tmp = s.substring(first);
int last = tmp.lastIndexOf(part2);
tmp = tmp.substring(0, last);
return tmp;
}
}

View file

@ -0,0 +1,55 @@
Sphinx-4 Speech Recognition System
-------------------------------------------------------------------
Sphinx-4 is a state-of-the-art, speaker-independent, continuous speech
recognition system written entirely in the Java programming language. It
was created via a joint collaboration between the Sphinx group at
Carnegie Mellon University, Sun Microsystems Laboratories, Mitsubishi
Electric Research Labs (MERL), and Hewlett Packard (HP), with
contributions from the University of California at Santa Cruz (UCSC) and
the Massachusetts Institute of Technology (MIT).
The design of Sphinx-4 is based on patterns that have emerged from the
design of past systems as well as new requirements based on areas that
researchers currently want to explore. To exercise this framework, and
to provide researchers with a "research-ready" system, Sphinx-4 also
includes several implementations of both simple and state-of-the-art
techniques. The framework and the implementations are all freely
available via open source under a very generous BSD-style license.
Because it is written entirely in the Java programming language, Sphinx-4
can run on a variety of platforms without requiring any special
compilation or changes. We've tested Sphinx-4 on the following platforms
with success.
To get started with sphinx4 visit our wiki
http://cmusphinx.sourceforge.net/wiki
Please give Sphinx-4 a try and post your questions, comments, and
feedback to one of the CMU Sphinx Forums:
http://sourceforge.net/p/cmusphinx/discussion/sphinx4
We can also be reached at cmusphinx-devel@lists.sourceforge.net.
Sincerely,
The Sphinx-4 Team:
(in alph. order)
Evandro Gouvea, CMU (developer and speech advisor)
Peter Gorniak, MIT (developer)
Philip Kwok, Sun Labs (developer)
Paul Lamere, Sun Labs (design/technical lead)
Beth Logan, HP (speech advisor)
Pedro Moreno, Google (speech advisor)
Bhiksha Raj, MERL (design lead)
Mosur Ravishankar, CMU (speech advisor)
Bent Schmidt-Nielsen, MERL (speech advisor)
Rita Singh, CMU/MIT (design/speech advisor)
JM Van Thong, HP (speech advisor)
Willie Walker, Sun Labs (overall lead)
Manfred Warmuth, USCS (speech advisor)
Joe Woelfel, MERL (developer and speech advisor)
Peter Wolf, MERL (developer and speech advisor)

View file

@ -0,0 +1,193 @@
Sphinx-4 Speech Recognition System
-------------------------------------------------------------------
Version: 1.0Beta6
Release Date: March 2011
-------------------------------------------------------------------
New Features and Improvements:
* SRGS/GrXML support, more to come soon with support for JSAPI2
* Model layout is unified with Pocketsphinx/Sphinxtrain
* Netbeans project files are included
* Language models can be loaded from URI
* Batch testing application allows testing inside Sphinxtrain
Bug Fixes:
* Flat linguist accuracy issue fixed
* Intelligent sorting in paritioner fixes stack overflow when tokens
have identical scores
* Various bug fixes
Thanks:
Timo Bauman, Nasir Hussain, Michele Alessandrini, Evandro Goueva,
Stephen Marquard, Larry A. Taylor, Yuri Orlov, Dirk Schnelle-Walka,
James Chivers, Firas Al Khalil
-------------------------------------------------------------------
Version: 1.0Beta5
Release Date: August 2010
-------------------------------------------------------------------
New Features and Improvements:
* Alignment demo and grammar to align long speech recordings to
transcription and get word times
* Lattice grammar for multipass decoding
* Explicit-backoff in LexTree linguist
* Significant LVCSR speedup with proper LexTree compression
* Simple filter to drop zero energy frames
* Graphviz for grammar dump vizualization instead of AISee
* Voxforge decoding accuracy test
* Lattice scoring speedup
* JSAPI-free JSGF parser
Bug Fixes:
* Insertion probabilities are counted in lattice scores
* Don't waste resources and memory on dummy acoustic model
transformations
* Small DMP files are loaded properly
* JSGF parser fixes
* Documentation improvements
* Debian package stuff
Thanks:
Antoine Raux, Marek Lesiak, Yaniv Kunda, Brian Romanowski, Tony
Robinson, Bhiksha Raj, Timo Baumann, Michele Alessandrini, Francisco
Aguilera, Peter Wolf, David Huggins-Daines, Dirk Schnelle-Walka.
-------------------------------------------------------------------
Version: 1.0Beta4
Release Date: February 2010
-------------------------------------------------------------------
New Features and Improvements:
* Large arbitrary-order language models
* Simplified and reworked model loading code
* Raw configuration and and demos
* HTK model loader
* A lot of code optimizations
* JSAPI-independent JSGF parser
* Noise filtering components
* Lattice rescoring
* Server-based language model
Bug fixes:
* Lots of bug fixes: PLP extraction, race-conditions
in scoring, etc.
Thanks:
Peter Wolf, Yaniv Kunda, Antoine Raux, Dirk Schnelle-Walka,
Yannick Estève, Anthony Rousseau and LIUM team, Christophe Cerisara.
-------------------------------------------------------------------
Version: 1.0Beta3
Release Date: August 2009
-------------------------------------------------------------------
New Features and Improvements:
* BatchAGC frontend component
* Completed transition to defaults in annotations
* ConcatFeatureExtrator to cooperate with cepwin models
* End of stream signals are passed to the decoder to fix cancellation
* Timer API improvement
* Threading policy is changed to TAS
Bug fixes:
* Fixes reading UTF-8 from language model dump.
* Huge memory optimization of the lattice compression
* More stable fronend work with DataStart and DataEnd and optional
SpeechStart/SpeechEnd
Thanks:
Yaniv Kunda, Michele Alessandrini, Holger Brandl, Timo Baumann,
Evandro Gouvea
-------------------------------------------------------------------
Version: 1.0Beta2
Release Date: February 2009
-------------------------------------------------------------------
New Features and Improvments:
* new much cleaner and more robust configuration system
* migrated to java5
* xml-free instantiation of new systems
* improved feature extraction (better voice activity detection, many bugfixes)
* Cleaned up some of the core APIs
* include-tag for configuration files
* better JavaSound support
* fully qualified grammar names in JSGF (Roger Toenz)
* support for dictionary addenda in the FastDictionary (Gregg Liming)
* added batch tools for measuring performance on NIST corpus with CTL files
* many perforamnce and stability improvments
-------------------------------------------------------------------
Version: 1.0Beta
Release Date: September 2004
-------------------------------------------------------------------
New Features:
* Confidence scoring
* Posterior probability computation
* Sausage creation from a lattice
* Dynamic grammars
* Narrow bandwidth acoustic model
* Out-of-grammar utterance rejection
* More demonstration programs
* WSJ5K Language model
Improvements:
* Better control over microphone selection
* JSGF limitations removed
* Improved performance for large, perplex JSGF grammars
* Added Filler support for JSGF Grammars
* Ability to configure microphone input
* Added ECMAScript Action Tags support and demos.
Bug fixes:
* Lots of bug fixes
Documentation:
* Added the Sphinx-4 FAQ
* Added scripts and instructions for building a WSJ5k language model
from LDC data.
Thanks:
* Peter Gorniak, Willie Walker, Philip Kwok, Paul Lamere
-------------------------------------------------------------------
Version: 0.1alpha
Release Date: June 2004
-------------------------------------------------------------------
Initial release

View file

@ -0,0 +1,88 @@
Speaker Adaptation with MLLR Transformation
Unsupervised speaker adaptation for Sphinx4
For building an improved acoustic model there are two methods. One of them
needs to collect data from a speaker and train the acoustic model set. Thus
using the speaker's characteristics the recognition will be more accurately.
The disadvantage of this method is that it needs a large amount of data to be
collected to have a sufficient model accuracy.
The other method, when the amount of data available is small from a new
speaker, is to collect them and by using an adaptation technique to adapt the
model set to better fit the speaker's characteristics.
The adaptation technique used is MLLR (maximum likelihood linear regression)
transform that is applied depending on the available data by generating one or
more transformations that reduce the mismatch between
an initial model set and the adaptation data. There is only one transformation
when the amount of available data is too small and is called global adaptation
transform. The global transform is applied to every Gaussian component in the
model set. Otherwise, when the amount of adaptation data is large, the number
of transformations is increasing and each transformation is applied to a
certain cluster of Gaussian components.
To be able to decode with an adapted model there are two important classes that
should be imported:
import edu.cmu.sphinx.decoder.adaptation.Stats;
import edu.cmu.sphinx.decoder.adaptation.Transform;
Stats Class estimates a MLLR transform for each cluster of data and the
transform will be applied to the corresponding cluster. You can choose the
number of clusters by giving the number as argument to
createStats(nrOfClusters) in Stats method. The method will return an object
that contains the loaded acoustic model and the number of clusters. This
important to collect counts from each Result object because based on them we
will perform the estimation of the MLLR transformation.
Before starting collect counts it is important to have all Gaussians clustered.
So, createStats(nrOfClusters) will generate an ClusteredDensityFileData object
to prepare the Gaussians. ClusteredDensityFileData class performs the clustering
using the "k-means" clustering algorithm. The k-means clustering algorithm aims
to partition the Gaussians into k clusters in which each Gaussian belongs
to the cluster with the nearest mean. It is interesting to know that the problem
of clustering is computationally difficult, so the heuristic used is the
Euclidean criterion.
The next step is to collect counts from each Result object and store them
separately for each cluster. Here, the matrices regLs and regRs used in
computing the transformation are filled. Transform class performs the actual
transformation for each cluster. Given the counts previously gathered and the
number of clusters, the class will compute the two matrices A (the
transformation matrix) B (the bias vector) that are tied across the Gaussians
from the corresponding cluster. A Transform object will contain all the
transformations computed for an utterance. To use the adapted acoustic model it
is necessary to update the Sphinx3Loader which is responsible for
loading the files from the model. When updating occurs, the acoustic model is
already loaded, so setTransform(transform) method will replace the old means
with the new ones.
Now, that we have the theoretical part, lets see the practical part. Here is
how you create and use a MLLR transformation:
Stats stats = recognizer.createStats(1);
recognizer.startRecognition(stream);
while ((result = recognizer.getResult()) != null) {
stats.collect(result);
}
recognizer.stopRecognition();
// Transform represents the speech profile
Transform transform = stats.createTransform();
recognizer.setTransform(transform);
After setting the transformation to the StreamSpeechRecognizer object,
the recognizer is ready to decode using the new means. The process
of recognition is the same as you decode with the general acoustic model.
When you create and set a transformation is like you create a
new acoustic model with speaker's characteristics, thus the accuracy
will be better.
For further decodings you can store the transformation of a speaker in a file
by performing store(“FilePath”, 0) in Transform object.
If you have your own transformation known as mllr_matrix previously generated
with Sphinx4 or with another program, you can load the file by performing
load(“FilePath”) in Transform object and then to set it to an Recognizer object.

View file

@ -0,0 +1,40 @@
Copyright 1999-2015 Carnegie Mellon University.
Portions Copyright 2002-2008 Sun Microsystems, Inc.
Portions Copyright 2002-2008 Mitsubishi Electric Research Laboratories.
Portions Copyright 2013-2015 Alpha Cephei, Inc.
All Rights Reserved. Use is subject to license terms.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Original authors' names are not deleted.
4. The authors' names are not used to endorse or promote products
derived from this software without specific prior written
permission.
This work was supported in part by funding from the Defense Advanced
Research Projects Agency and the National Science Foundation of the
United States of America, the CMU Sphinx Speech Consortium, and
Sun Microsystems, Inc.
CARNEGIE MELLON UNIVERSITY, SUN MICROSYSTEMS, INC., MITSUBISHI
ELECTRONIC RESEARCH LABORATORIES AND THE CONTRIBUTORS TO THIS WORK
DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
CARNEGIE MELLON UNIVERSITY, SUN MICROSYSTEMS, INC., MITSUBISHI
ELECTRONIC RESEARCH LABORATORIES NOR THE CONTRIBUTORS BE LIABLE FOR
ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View file

@ -0,0 +1,88 @@
<project
xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>7</version>
</parent>
<groupId>edu.cmu.sphinx</groupId>
<artifactId>sphinx4-parent</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>pom</packaging>
<name>Sphinx4</name>
<url>http://cmusphinx.sourceforge.net</url>
<modules>
<module>sphinx4-core</module>
<module>sphinx4-data</module>
<module>sphinx4-samples</module>
</modules>
<dependencies>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.8.8</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>1.3</version>
<scope>test</scope>
</dependency>
</dependencies>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.scm.root>svn.code.sf.net/p/cmusphinx/code/trunk/sphinx4</project.scm.root>
</properties>
<scm>
<connection>scm:svn:http://${project.scm.root}</connection>
<developerConnection>scm:svn:svn+ssh://${project.scm.root}</developerConnection>
<url>http://${project.scm.root}</url>
</scm>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>2.2.1</version>
<executions>
<execution>
<id>attach-sources</id>
<phase>package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>2.9.1</version>
<executions>
<execution>
<id>attach-javadocs</id>
<phase>package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View file

@ -0,0 +1,34 @@
<project
xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>edu.cmu.sphinx</groupId>
<artifactId>sphinx4-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>sphinx4-core</artifactId>
<packaging>jar</packaging>
<name>Sphinx4 core</name>
<dependencies>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-math3</artifactId>
<version>3.2</version>
</dependency>
<dependency>
<groupId>edu.cmu.sphinx</groupId>
<artifactId>sphinx4-data</artifactId>
<version>1.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View file

@ -0,0 +1,355 @@
/*
* Copyright 2014 Alpha Cephei Inc.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*
*/
package edu.cmu.sphinx.alignment;
import static java.lang.Math.abs;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static java.util.Arrays.fill;
import static java.util.Collections.emptyList;
import java.util.*;
import edu.cmu.sphinx.util.Range;
import edu.cmu.sphinx.util.Utilities;
/**
*
* @author Alexander Solovets
*/
public class LongTextAligner {
private final class Alignment {
public final class Node {
private final int databaseIndex;
private final int queryIndex;
private Node(int row, int column) {
this.databaseIndex = column;
this.queryIndex = row;
}
public int getDatabaseIndex() {
return shifts.get(databaseIndex - 1);
}
public int getQueryIndex() {
return indices.get(queryIndex - 1);
}
public String getQueryWord() {
if (queryIndex > 0)
return query.get(getQueryIndex());
return null;
}
public String getDatabaseWord() {
if (databaseIndex > 0)
return reftup.get(getDatabaseIndex());
return null;
}
public int getValue() {
if (isBoundary())
return max(queryIndex, databaseIndex);
return hasMatch() ? 0 : 1;
}
public boolean hasMatch() {
return getQueryWord().equals(getDatabaseWord());
}
public boolean isBoundary() {
return queryIndex == 0 || databaseIndex == 0;
}
public boolean isTarget() {
return queryIndex == indices.size() &&
databaseIndex == shifts.size();
}
public List<Node> adjacent() {
List<Node> result = new ArrayList<Node>(3);
if (queryIndex < indices.size() &&
databaseIndex < shifts.size()) {
result.add(new Node(queryIndex + 1, databaseIndex + 1));
}
if (databaseIndex < shifts.size()) {
result.add(new Node(queryIndex, databaseIndex + 1));
}
if (queryIndex < indices.size()) {
result.add(new Node(queryIndex + 1, databaseIndex));
}
return result;
}
@Override
public boolean equals(Object object) {
if (!(object instanceof Node))
return false;
Node other = (Node) object;
return queryIndex == other.queryIndex &&
databaseIndex == other.databaseIndex;
}
@Override
public int hashCode() {
return 31 * (31 * queryIndex + databaseIndex);
}
@Override
public String toString() {
return String.format("[%d %d]", queryIndex, databaseIndex);
}
}
private final List<Integer> shifts;
private final List<String> query;
private final List<Integer> indices;
private final List<Node> alignment;
public Alignment(List<String> query, Range range) {
this.query = query;
indices = new ArrayList<Integer>();
Set<Integer> shiftSet = new TreeSet<Integer>();
for (int i = 0; i < query.size(); i++) {
if (tupleIndex.containsKey(query.get(i))) {
indices.add(i);
for (Integer shift : tupleIndex.get(query.get(i))) {
if (range.contains(shift))
shiftSet.add(shift);
}
}
}
shifts = new ArrayList<Integer>(shiftSet);
final Map<Node, Integer> cost = new HashMap<Node, Integer>();
PriorityQueue<Node> openSet = new PriorityQueue<Node>(1, new Comparator<Node>() {
@Override
public int compare(Node o1, Node o2) {
return cost.get(o1).compareTo(cost.get(o2));
}
});
Collection<Node> closedSet = new HashSet<Node>();
Map<Node, Node> parents = new HashMap<Node, Node>();
Node startNode = new Node(0, 0);
cost.put(startNode, 0);
openSet.add(startNode);
while (!openSet.isEmpty()) {
Node q = openSet.poll();
if (closedSet.contains(q))
continue;
if (q.isTarget()) {
List<Node> backtrace = new ArrayList<Node>();
while (parents.containsKey(q)) {
if (!q.isBoundary() && q.hasMatch())
backtrace.add(q);
q = parents.get(q);
}
alignment = new ArrayList<Node>(backtrace);
Collections.reverse(alignment);
return;
}
closedSet.add(q);
for (Node nb : q.adjacent()) {
if (closedSet.contains(nb))
continue;
// FIXME: move to appropriate location
int l = abs(indices.size() - shifts.size() - q.queryIndex +
q.databaseIndex) -
abs(indices.size() - shifts.size() -
nb.queryIndex +
nb.databaseIndex);
Integer oldScore = cost.get(nb);
Integer qScore = cost.get(q);
if (oldScore == null)
oldScore = Integer.MAX_VALUE;
if (qScore == null)
qScore = Integer.MAX_VALUE;
int newScore = qScore + nb.getValue() - l;
if (newScore < oldScore) {
cost.put(nb, newScore);
openSet.add(nb);
parents.put(nb, q);
}
}
}
alignment = emptyList();
}
public List<Node> getIndices() {
return alignment;
}
}
private final int tupleSize;
private final List<String> reftup;
private final HashMap<String, ArrayList<Integer>> tupleIndex;
private List<String> refWords;
/**
* Constructs new text aligner that servers requests for alignment of
* sequence of words with the provided database sequence. Sequences are
* aligned by tuples comprising one or more subsequent words.
*
* @param words list of words forming the database
* @param tupleSize size of a tuple, must be greater or equal to 1
*/
public LongTextAligner(List<String> words, int tupleSize) {
assert words != null;
assert tupleSize > 0;
this.tupleSize = tupleSize;
this.refWords = words;
int offset = 0;
reftup = getTuples(words);
tupleIndex = new HashMap<String, ArrayList<Integer>>();
for (String tuple : reftup) {
ArrayList<Integer> indexes = tupleIndex.get(tuple);
if (indexes == null) {
indexes = new ArrayList<Integer>();
tupleIndex.put(tuple, indexes);
}
indexes.add(offset++);
}
}
/**
* Aligns query sequence with the previously built database.
* @param query list of words to look for
*
* @return indices of alignment
*/
public int[] align(List<String> query) {
return align(query, new Range(0, refWords.size()));
}
/**
* Aligns query sequence with the previously built database.
* @param words list words to look for
* @param range range of database to look for alignment
*
* @return indices of alignment
*/
public int[] align(List<String> words, Range range) {
if (range.upperEndpoint() - range.lowerEndpoint() < tupleSize || words.size() < tupleSize) {
return alignTextSimple(refWords.subList(range.lowerEndpoint(), range.upperEndpoint()), words, range.lowerEndpoint());
}
int[] result = new int[words.size()];
fill(result, -1);
int lastIndex = 0;
for (Alignment.Node node : new Alignment(getTuples(words), range)
.getIndices()) {
// for (int j = 0; j < tupleSize; ++j)
lastIndex = max(lastIndex, node.getQueryIndex());
for (; lastIndex < node.getQueryIndex() + tupleSize; ++lastIndex)
result[lastIndex] = node.getDatabaseIndex() + lastIndex -
node.getQueryIndex();
}
return result;
}
/**
* Makes list of tuples of the given size out of list of words.
*
* @param words words
* @return list of tuples of size {@link #tupleSize}
*/
private List<String> getTuples(List<String> words) {
List<String> result = new ArrayList<String>();
LinkedList<String> tuple = new LinkedList<String>();
Iterator<String> it = words.iterator();
for (int i = 0; i < tupleSize - 1; i++) {
tuple.add(it.next());
}
while (it.hasNext()) {
tuple.addLast(it.next());
result.add(Utilities.join(tuple));
tuple.removeFirst();
}
return result;
}
static int[] alignTextSimple(List<String> database, List<String> query,
int offset) {
int n = database.size() + 1;
int m = query.size() + 1;
int[][] f = new int[n][m];
f[0][0] = 0;
for (int i = 1; i < n; ++i) {
f[i][0] = i;
}
for (int j = 1; j < m; ++j) {
f[0][j] = j;
}
for (int i = 1; i < n; ++i) {
for (int j = 1; j < m; ++j) {
int match = f[i - 1][j - 1];
String refWord = database.get(i - 1);
String queryWord = query.get(j - 1);
if (!refWord.equals(queryWord)) {
++match;
}
int insert = f[i][j - 1] + 1;
int delete = f[i - 1][j] + 1;
f[i][j] = min(match, min(insert, delete));
}
}
--n;
--m;
int[] alignment = new int[m];
Arrays.fill(alignment, -1);
while (m > 0) {
if (n == 0) {
--m;
} else {
String refWord = database.get(n - 1);
String queryWord = query.get(m - 1);
if (f[n - 1][m - 1] <= f[n - 1][m - 1]
&& f[n - 1][m - 1] <= f[n][m - 1]
&& refWord.equals(queryWord)) {
alignment[--m] = --n + offset;
} else {
if (f[n - 1][m] < f[n][m - 1]) {
--n;
} else {
--m;
}
}
}
}
return alignment;
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright 2014 Alpha Cephei Inc.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment;
import java.util.Arrays;
import java.util.List;
public class SimpleTokenizer implements TextTokenizer {
public List<String> expand(String text) {
text = text.replace('', '\'');
text = text.replace('', ' ');
text = text.replace('”', ' ');
text = text.replace('“', ' ');
text = text.replace('"', ' ');
text = text.replace('»', ' ');
text = text.replace('«', ' ');
text = text.replace('', '-');
text = text.replace('—', ' ');
text = text.replace('…', ' ');
text = text.replace(" - ", " ");
text = text.replaceAll("[/_*%]", " ");
text = text.toLowerCase();
String[] tokens = text.split("[.,?:!;()]");
return Arrays.asList(tokens);
}
}

View file

@ -0,0 +1,25 @@
/*
* Copyright 2014 Alpha Cephei Inc.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*
*/
package edu.cmu.sphinx.alignment;
import java.util.List;
public interface TextTokenizer {
/**
* Cleans the text and returns the list of lines
*
* @param text Input text
* @return a list of lines in the text.
*/
List<String> expand(String text);
}

View file

@ -0,0 +1,158 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment;
/**
* Contains a parsed token from a Tokenizer.
*/
public class Token {
private String token = null;
private String whitespace = null;
private String prepunctuation = null;
private String postpunctuation = null;
private int position = 0; // position in the original input text
private int lineNumber = 0;
/**
* Returns the whitespace characters of this Token.
*
* @return the whitespace characters of this Token; null if this Token does
* not use whitespace characters
*/
public String getWhitespace() {
return whitespace;
}
/**
* Returns the prepunctuation characters of this Token.
*
* @return the prepunctuation characters of this Token; null if this Token
* does not use prepunctuation characters
*/
public String getPrepunctuation() {
return prepunctuation;
}
/**
* Returns the postpunctuation characters of this Token.
*
* @return the postpunctuation characters of this Token; null if this Token
* does not use postpunctuation characters
*/
public String getPostpunctuation() {
return postpunctuation;
}
/**
* Returns the position of this token in the original input text.
*
* @return the position of this token in the original input text
*/
public int getPosition() {
return position;
}
/**
* Returns the line of this token in the original text.
*
* @return the line of this token in the original text
*/
public int getLineNumber() {
return lineNumber;
}
/**
* Sets the whitespace characters of this Token.
*
* @param whitespace the whitespace character for this token
*/
public void setWhitespace(String whitespace) {
this.whitespace = whitespace;
}
/**
* Sets the prepunctuation characters of this Token.
*
* @param prepunctuation the prepunctuation characters
*/
public void setPrepunctuation(String prepunctuation) {
this.prepunctuation = prepunctuation;
}
/**
* Sets the postpunctuation characters of this Token.
*
* @param postpunctuation the postpunctuation characters
*/
public void setPostpunctuation(String postpunctuation) {
this.postpunctuation = postpunctuation;
}
/**
* Sets the position of the token in the original input text.
*
* @param position the position of the input text
*/
public void setPosition(int position) {
this.position = position;
}
/**
* Set the line of this token in the original text.
*
* @param lineNumber the line of this token in the original text
*/
public void setLineNumber(int lineNumber) {
this.lineNumber = lineNumber;
}
/**
* Returns the string associated with this token.
*
* @return the token if it exists; otherwise null
*/
public String getWord() {
return token;
}
/**
* Sets the string of this Token.
*
* @param word the word for this token
*/
public void setWord(String word) {
token = word;
}
/**
* Converts this token to a string.
*
* @return the string representation of this object
*/
public String toString() {
StringBuffer fullToken = new StringBuffer();
if (whitespace != null) {
fullToken.append(whitespace);
}
if (prepunctuation != null) {
fullToken.append(prepunctuation);
}
if (token != null) {
fullToken.append(token);
}
if (postpunctuation != null) {
fullToken.append(postpunctuation);
}
return fullToken.toString();
}
}

View file

@ -0,0 +1,405 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import edu.cmu.sphinx.alignment.Token;
/**
* Implements the tokenizer interface. Breaks an input sequence of characters
* into a set of tokens.
*/
public class CharTokenizer implements Iterator<Token> {
/** A constant indicating that the end of the stream has been read. */
public static final int EOF = -1;
/** A string containing the default whitespace characters. */
public static final String DEFAULT_WHITESPACE_SYMBOLS = " \t\n\r";
/** A string containing the default single characters. */
public static final String DEFAULT_SINGLE_CHAR_SYMBOLS = "(){}[]";
/** A string containing the default pre-punctuation characters. */
public static final String DEFAULT_PREPUNCTUATION_SYMBOLS = "\"'`({[";
/** A string containing the default post-punctuation characters. */
public static final String DEFAULT_POSTPUNCTUATION_SYMBOLS =
"\"'`.,:;!?(){}[]";
/** The line number. */
private int lineNumber;
/** The input text (from the Utterance) to tokenize. */
private String inputText;
/** The file to read input text from, if using file mode. */
private Reader reader;
/** The current character, whether its from the file or the input text. */
private int currentChar;
/**
* The current char position for the input text (not the file) this is
* called "file_pos" in flite
*/
private int currentPosition;
/** The delimiting symbols of this tokenizer. */
private String whitespaceSymbols = DEFAULT_WHITESPACE_SYMBOLS;
private String singleCharSymbols = DEFAULT_SINGLE_CHAR_SYMBOLS;
private String prepunctuationSymbols = DEFAULT_PREPUNCTUATION_SYMBOLS;
private String postpunctuationSymbols = DEFAULT_POSTPUNCTUATION_SYMBOLS;
/** The error description. */
private String errorDescription;
/** A place to store the current token. */
private Token token;
private Token lastToken;
/**
* Constructs a Tokenizer.
*/
public CharTokenizer() {}
/**
* Creates a tokenizer that will return tokens from the given string.
*
* @param string the string to tokenize
*/
public CharTokenizer(String string) {
setInputText(string);
}
/**
* Creates a tokenizer that will return tokens from the given file.
*
* @param file where to read the input from
*/
public CharTokenizer(Reader file) {
setInputReader(file);
}
/**
* Sets the whitespace symbols of this Tokenizer to the given symbols.
*
* @param symbols the whitespace symbols
*/
public void setWhitespaceSymbols(String symbols) {
whitespaceSymbols = symbols;
}
/**
* Sets the single character symbols of this Tokenizer to the given
* symbols.
*
* @param symbols the single character symbols
*/
public void setSingleCharSymbols(String symbols) {
singleCharSymbols = symbols;
}
/**
* Sets the prepunctuation symbols of this Tokenizer to the given symbols.
*
* @param symbols the prepunctuation symbols
*/
public void setPrepunctuationSymbols(String symbols) {
prepunctuationSymbols = symbols;
}
/**
* Sets the postpunctuation symbols of this Tokenizer to the given symbols.
*
* @param symbols the postpunctuation symbols
*/
public void setPostpunctuationSymbols(String symbols) {
postpunctuationSymbols = symbols;
}
/**
* Sets the text to tokenize.
*
* @param inputString the string to tokenize
*/
public void setInputText(String inputString) {
inputText = inputString;
currentPosition = 0;
if (inputText != null) {
getNextChar();
}
}
/**
* Sets the input reader
*
* @param reader the input source
*/
public void setInputReader(Reader reader) {
this.reader = reader;
getNextChar();
}
/**
* Returns the next token.
*
* @return the next token if it exists, <code>null</code> if no more tokens
*/
public Token next() {
lastToken = token;
token = new Token();
// Skip whitespace
token.setWhitespace(getTokenOfCharClass(whitespaceSymbols));
// quoted strings currently ignored
// get prepunctuation
token.setPrepunctuation(getTokenOfCharClass(prepunctuationSymbols));
// get the symbol itself
if (singleCharSymbols.indexOf(currentChar) != -1) {
token.setWord(String.valueOf((char) currentChar));
getNextChar();
} else {
token.setWord(getTokenNotOfCharClass(whitespaceSymbols));
}
token.setPosition(currentPosition);
token.setLineNumber(lineNumber);
// This'll have token *plus* postpunctuation
// Get postpunctuation
removeTokenPostpunctuation();
return token;
}
/**
* Returns <code>true</code> if there are more tokens, <code>false</code>
* otherwise.
*
* @return <code>true</code> if there are more tokens <code>false</code>
* otherwise
*/
public boolean hasNext() {
int nextChar = currentChar;
return (nextChar != EOF);
}
public void remove() {
throw new UnsupportedOperationException();
}
/**
* Advances the currentPosition pointer by 1 (if not exceeding length of
* inputText, and returns the character pointed by currentPosition.
*
* @return the next character EOF if no more characters exist
*/
private int getNextChar() {
if (reader != null) {
try {
int readVal = reader.read();
if (readVal == -1) {
currentChar = EOF;
} else {
currentChar = (char) readVal;
}
} catch (IOException ioe) {
currentChar = EOF;
errorDescription = ioe.getMessage();
}
} else if (inputText != null) {
if (currentPosition < inputText.length()) {
currentChar = (int) inputText.charAt(currentPosition);
} else {
currentChar = EOF;
}
}
if (currentChar != EOF) {
currentPosition++;
}
if (currentChar == '\n') {
lineNumber++;
}
return currentChar;
}
/**
* Starting from the current position of the input text, returns the
* subsequent characters of type charClass, and not of type
* singleCharSymbols.
*
* @param charClass the type of characters to look for
* @param buffer the place to append characters of type charClass
*
* @return a string of characters starting from the current position of the
* input text, until it encounters a character not in the string
* charClass
*
*/
private String getTokenOfCharClass(String charClass) {
return getTokenByCharClass(charClass, true);
}
/**
* Starting from the current position of the input text/file, returns the
* subsequent characters, not of type singleCharSymbols, and ended at
* characters of type endingCharClass. E.g., if the current string is
* "xxxxyyy", endingCharClass is "yz", and singleCharClass "abc". Then this
* method will return to "xxxx".
*
* @param endingCharClass the type of characters to look for
*
* @return a string of characters from the current position until it
* encounters characters in endingCharClass
*
*/
private String getTokenNotOfCharClass(String endingCharClass) {
return getTokenByCharClass(endingCharClass, false);
}
/**
* Provides a `compressed' method from getTokenOfCharClass() and
* getTokenNotOfCharClass(). If parameter containThisCharClass is
* <code>true</code>, then a string from the current position to the last
* character in charClass is returned. If containThisCharClass is
* <code>false</code> , then a string before the first occurrence of a
* character in containThisCharClass is returned.
*
* @param charClass the string of characters you want included or excluded
* in your return
* @param containThisCharClass determines if you want characters in
* charClass in the returned string or not
*
* @return a string of characters from the current position until it
* encounters characters in endingCharClass
*/
private String getTokenByCharClass(String charClass,
boolean containThisCharClass) {
final StringBuilder buffer = new StringBuilder();
// if we want the returned string to contain chars in charClass, then
// containThisCharClass is TRUE and
// (charClass.indexOf(currentChar) != 1) == containThisCharClass)
// returns true; if we want it to stop at characters of charClass,
// then containThisCharClass is FALSE, and the condition returns
// false.
while ((charClass.indexOf(currentChar) != -1) == containThisCharClass
&& singleCharSymbols.indexOf(currentChar) == -1
&& currentChar != EOF) {
buffer.append((char) currentChar);
getNextChar();
}
return buffer.toString();
}
/**
* Removes the postpunctuation characters from the current token. Copies
* those postpunctuation characters to the class variable
* 'postpunctuation'.
*/
private void removeTokenPostpunctuation() {
if (token == null) {
return;
}
final String tokenWord = token.getWord();
int tokenLength = tokenWord.length();
int position = tokenLength - 1;
while (position > 0
&& postpunctuationSymbols.indexOf((int) tokenWord
.charAt(position)) != -1) {
position--;
}
if (tokenLength - 1 != position) {
// Copy postpunctuation from token
token.setPostpunctuation(tokenWord.substring(position + 1));
// truncate token at postpunctuation
token.setWord(tokenWord.substring(0, position + 1));
} else {
token.setPostpunctuation("");
}
}
/**
* Returns <code>true</code> if there were errors while reading tokens
*
* @return <code>true</code> if there were errors; <code>false</code>
* otherwise
*/
public boolean hasErrors() {
return errorDescription != null;
}
/**
* if hasErrors returns <code>true</code>, this will return a description
* of the error encountered, otherwise it will return <code>null</code>
*
* @return a description of the last error that occurred.
*/
public String getErrorDescription() {
return errorDescription;
}
/**
* Determines if the current token should start a new sentence.
*
* @return <code>true</code> if a new sentence should be started
*/
public boolean isSentenceSeparator() {
String tokenWhiteSpace = token.getWhitespace();
String lastTokenPostpunctuation = null;
if (lastToken != null) {
lastTokenPostpunctuation = lastToken.getPostpunctuation();
}
if (lastToken == null || token == null) {
return false;
} else if (tokenWhiteSpace.indexOf('\n') != tokenWhiteSpace
.lastIndexOf('\n')) {
return true;
} else if (lastTokenPostpunctuation.indexOf(':') != -1
|| lastTokenPostpunctuation.indexOf('?') != -1
|| lastTokenPostpunctuation.indexOf('!') != -1) {
return true;
} else if (lastTokenPostpunctuation.indexOf('.') != -1
&& tokenWhiteSpace.length() > 1
&& Character.isUpperCase(token.getWord().charAt(0))) {
return true;
} else {
String lastWord = lastToken.getWord();
int lastWordLength = lastWord.length();
if (lastTokenPostpunctuation.indexOf('.') != -1
&&
/* next word starts with a capital */
Character.isUpperCase(token.getWord().charAt(0))
&&
/* last word isn't an abbreviation */
!(Character.isUpperCase(lastWord
.charAt(lastWordLength - 1)) || (lastWordLength < 4 && Character
.isUpperCase(lastWord.charAt(0))))) {
return true;
}
}
return false;
}
}

View file

@ -0,0 +1,608 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
import java.io.*;
import java.net.URL;
import java.util.StringTokenizer;
import java.util.logging.Logger;
import java.util.regex.Pattern;
/**
* Implementation of a Classification and Regression Tree (CART) that is used
* more like a binary decision tree, with each node containing a decision or a
* final value. The decision nodes in the CART trees operate on an Item and
* have the following format:
*
* <pre>
* NODE feat operand value qfalse
* </pre>
*
* <p>
* Where <code>feat</code> is an string that represents a feature to pass to
* the <code>findFeature</code> method of an item.
*
* <p>
* The <code>value</code> represents the value to be compared against the
* feature obtained from the item via the <code>feat</code> string. The
* <code>operand</code> is the operation to do the comparison. The available
* operands are as follows:
*
* <ul>
* <li>&lt; - the feature is less than value
* <li>=- the feature is equal to the value
* <li>&gt;- the feature is greater than the value
* <li>MATCHES - the feature matches the regular expression stored in value
* <li>IN - [[[TODO: still guessing because none of the CART's in Flite seem to
* use IN]]] the value is in the list defined by the feature.
* </ul>
*
* <p>
* [[[TODO: provide support for the IN operator.]]]
*
* <p>
* For &lt; and &gt;, this CART coerces the value and feature to float's. For =,
* this CART coerces the value and feature to string and checks for string
* equality. For MATCHES, this CART uses the value as a regular expression and
* compares the obtained feature to that.
*
* <p>
* A CART is represented by an array in this implementation. The
* <code>qfalse</code> value represents the index of the array to go to if the
* comparison does not match. In this implementation, qtrue index is always
* implied, and represents the next element in the array. The root node of the
* CART is the first element in the array.
*
* <p>
* The interpretations always start at the root node of the CART and continue
* until a final node is found. The final nodes have the following form:
*
* <pre>
* LEAF value
* </pre>
*
* <p>
* Where <code>value</code> represents the value of the node. Reaching a final
* node indicates the interpretation is over and the value of the node is the
* interpretation result.
*/
public class DecisionTree {
/** Logger instance. */
private static final Logger logger = Logger.getLogger(DecisionTree.class.getSimpleName());
/**
* Entry in file represents the total number of nodes in the file. This
* should be at the top of the file. The format should be "TOTAL n" where n
* is an integer value.
*/
final static String TOTAL = "TOTAL";
/**
* Entry in file represents a node. The format should be
* "NODE feat op val f" where 'feat' represents a feature, op represents an
* operand, val is the value, and f is the index of the node to go to is
* there isn't a match.
*/
final static String NODE = "NODE";
/**
* Entry in file represents a final node. The format should be "LEAF val"
* where val represents the value.
*/
final static String LEAF = "LEAF";
/**
* OPERAND_MATCHES
*/
final static String OPERAND_MATCHES = "MATCHES";
/**
* The CART. Entries can be DecisionNode or LeafNode. An ArrayList could be
* used here -- I chose not to because I thought it might be quicker to
* avoid dealing with the dynamic resizing.
*/
Node[] cart = null;
/**
* The number of nodes in the CART.
*/
transient int curNode = 0;
/**
* Creates a new CART by reading from the given URL.
*
* @param url the location of the CART data
*
* @throws IOException if errors occur while reading the data
*/
public DecisionTree(URL url) throws IOException {
BufferedReader reader;
String line;
reader = new BufferedReader(new InputStreamReader(url.openStream()));
line = reader.readLine();
while (line != null) {
if (!line.startsWith("***")) {
parseAndAdd(line);
}
line = reader.readLine();
}
reader.close();
}
/**
* Creates a new CART by reading from the given reader.
*
* @param reader the source of the CART data
* @param nodes the number of nodes to read for this cart
*
* @throws IOException if errors occur while reading the data
*/
public DecisionTree(BufferedReader reader, int nodes) throws IOException {
this(nodes);
String line;
for (int i = 0; i < nodes; i++) {
line = reader.readLine();
if (!line.startsWith("***")) {
parseAndAdd(line);
}
}
}
/**
* Creates a new CART that will be populated with nodes later.
*
* @param numNodes the number of nodes
*/
private DecisionTree(int numNodes) {
cart = new Node[numNodes];
}
/**
* Dump the CART tree as a dot file.
* <p>
* The dot tool is part of the graphviz distribution at <a
* href="http://www.graphviz.org/">http://www.graphviz.org/</a>. If
* installed, call it as "dot -O -Tpdf *.dot" from the console to generate
* pdfs.
* </p>
*
* @param out The PrintWriter to write to.
*/
public void dumpDot(PrintWriter out) {
out.write("digraph \"" + "CART Tree" + "\" {\n");
out.write("rankdir = LR\n");
for (Node n : cart) {
out.println("\tnode" + Math.abs(n.hashCode()) + " [ label=\""
+ n.toString() + "\", color=" + dumpDotNodeColor(n)
+ ", shape=" + dumpDotNodeShape(n) + " ]\n");
if (n instanceof DecisionNode) {
DecisionNode dn = (DecisionNode) n;
if (dn.qtrue < cart.length && cart[dn.qtrue] != null) {
out.write("\tnode" + Math.abs(n.hashCode()) + " -> node"
+ Math.abs(cart[dn.qtrue].hashCode())
+ " [ label=" + "TRUE" + " ]\n");
}
if (dn.qfalse < cart.length && cart[dn.qfalse] != null) {
out.write("\tnode" + Math.abs(n.hashCode()) + " -> node"
+ Math.abs(cart[dn.qfalse].hashCode())
+ " [ label=" + "FALSE" + " ]\n");
}
}
}
out.write("}\n");
out.close();
}
protected String dumpDotNodeColor(Node n) {
if (n instanceof LeafNode) {
return "green";
}
return "red";
}
protected String dumpDotNodeShape(Node n) {
return "box";
}
/**
* Creates a node from the given input line and add it to the CART. It
* expects the TOTAL line to come before any of the nodes.
*
* @param line a line of input to parse
*/
protected void parseAndAdd(String line) {
StringTokenizer tokenizer = new StringTokenizer(line, " ");
String type = tokenizer.nextToken();
if (type.equals(LEAF) || type.equals(NODE)) {
cart[curNode] = getNode(type, tokenizer, curNode);
cart[curNode].setCreationLine(line);
curNode++;
} else if (type.equals(TOTAL)) {
cart = new Node[Integer.parseInt(tokenizer.nextToken())];
curNode = 0;
} else {
throw new Error("Invalid CART type: " + type);
}
}
/**
* Gets the node based upon the type and tokenizer.
*
* @param type <code>NODE</code> or <code>LEAF</code>
* @param tokenizer the StringTokenizer containing the data to get
* @param currentNode the index of the current node we're looking at
*
* @return the node
*/
protected Node getNode(String type, StringTokenizer tokenizer,
int currentNode) {
if (type.equals(NODE)) {
String feature = tokenizer.nextToken();
String operand = tokenizer.nextToken();
Object value = parseValue(tokenizer.nextToken());
int qfalse = Integer.parseInt(tokenizer.nextToken());
if (operand.equals(OPERAND_MATCHES)) {
return new MatchingNode(feature, value.toString(),
currentNode + 1, qfalse);
} else {
return new ComparisonNode(feature, value, operand,
currentNode + 1, qfalse);
}
} else if (type.equals(LEAF)) {
return new LeafNode(parseValue(tokenizer.nextToken()));
}
return null;
}
/**
* Coerces a string into a value.
*
* @param string of the form "type(value)"; for example, "Float(2.3)"
*
* @return the value
*/
protected Object parseValue(String string) {
int openParen = string.indexOf("(");
String type = string.substring(0, openParen);
String value = string.substring(openParen + 1, string.length() - 1);
if (type.equals("String")) {
return value;
} else if (type.equals("Float")) {
return new Float(Float.parseFloat(value));
} else if (type.equals("Integer")) {
return new Integer(Integer.parseInt(value));
} else if (type.equals("List")) {
StringTokenizer tok = new StringTokenizer(value, ",");
int size = tok.countTokens();
int[] values = new int[size];
for (int i = 0; i < size; i++) {
float fval = Float.parseFloat(tok.nextToken());
values[i] = Math.round(fval);
}
return values;
} else {
throw new Error("Unknown type: " + type);
}
}
/**
* Passes the given item through this CART and returns the interpretation.
*
* @param item the item to analyze
*
* @return the interpretation
*/
public Object interpret(Item item) {
int nodeIndex = 0;
DecisionNode decision;
while (!(cart[nodeIndex] instanceof LeafNode)) {
decision = (DecisionNode) cart[nodeIndex];
nodeIndex = decision.getNextNode(item);
}
logger.fine("LEAF " + cart[nodeIndex].getValue());
return ((LeafNode) cart[nodeIndex]).getValue();
}
/**
* A node for the CART.
*/
static abstract class Node {
/**
* The value of this node.
*/
protected Object value;
/**
* Create a new Node with the given value.
*/
public Node(Object value) {
this.value = value;
}
/**
* Get the value.
*/
public Object getValue() {
return value;
}
/**
* Return a string representation of the type of the value.
*/
public String getValueString() {
if (value == null) {
return "NULL()";
} else if (value instanceof String) {
return "String(" + value.toString() + ")";
} else if (value instanceof Float) {
return "Float(" + value.toString() + ")";
} else if (value instanceof Integer) {
return "Integer(" + value.toString() + ")";
} else {
return value.getClass().toString() + "(" + value.toString()
+ ")";
}
}
/**
* sets the line of text used to create this node.
*
* @param line the creation line
*/
public void setCreationLine(String line) {}
}
/**
* A decision node that determines the next Node to go to in the CART.
*/
abstract static class DecisionNode extends Node {
/**
* The feature used to find a value from an Item.
*/
private PathExtractor path;
/**
* Index of Node to go to if the comparison doesn't match.
*/
protected int qfalse;
/**
* Index of Node to go to if the comparison matches.
*/
protected int qtrue;
/**
* The feature used to find a value from an Item.
*/
public String getFeature() {
return path.toString();
}
/**
* Find the feature associated with this DecisionNode and the given
* item
*
* @param item the item to start from
* @return the object representing the feature
*/
public Object findFeature(Item item) {
return path.findFeature(item);
}
/**
* Returns the next node based upon the descision determined at this
* node
*
* @param item the current item.
* @return the index of the next node
*/
public final int getNextNode(Item item) {
return getNextNode(findFeature(item));
}
/**
* Create a new DecisionNode.
*
* @param feature the string used to get a value from an Item
* @param value the value to compare to
* @param qtrue the Node index to go to if the comparison matches
* @param qfalse the Node machine index to go to upon no match
*/
public DecisionNode(String feature, Object value, int qtrue, int qfalse) {
super(value);
this.path = new PathExtractor(feature, true);
this.qtrue = qtrue;
this.qfalse = qfalse;
}
/**
* Get the next Node to go to in the CART. The return value is an index
* in the CART.
*/
abstract public int getNextNode(Object val);
}
/**
* A decision Node that compares two values.
*/
static class ComparisonNode extends DecisionNode {
/**
* LESS_THAN
*/
final static String LESS_THAN = "<";
/**
* EQUALS
*/
final static String EQUALS = "=";
/**
* GREATER_THAN
*/
final static String GREATER_THAN = ">";
/**
* The comparison type. One of LESS_THAN, GREATER_THAN, or EQUAL_TO.
*/
String comparisonType;
/**
* Create a new ComparisonNode with the given values.
*
* @param feature the string used to get a value from an Item
* @param value the value to compare to
* @param comparisonType one of LESS_THAN, EQUAL_TO, or GREATER_THAN
* @param qtrue the Node index to go to if the comparison matches
* @param qfalse the Node index to go to upon no match
*/
public ComparisonNode(String feature, Object value,
String comparisonType, int qtrue, int qfalse) {
super(feature, value, qtrue, qfalse);
if (!comparisonType.equals(LESS_THAN)
&& !comparisonType.equals(EQUALS)
&& !comparisonType.equals(GREATER_THAN)) {
throw new Error("Invalid comparison type: " + comparisonType);
} else {
this.comparisonType = comparisonType;
}
}
/**
* Compare the given value and return the appropriate Node index.
* IMPLEMENTATION NOTE: LESS_THAN and GREATER_THAN, the Node's value
* and the value passed in are converted to floating point values. For
* EQUAL, the Node's value and the value passed in are treated as
* String compares. This is the way of Flite, so be it Flite.
*
* @param val the value to compare
*/
public int getNextNode(Object val) {
boolean yes = false;
int ret;
if (comparisonType.equals(LESS_THAN)
|| comparisonType.equals(GREATER_THAN)) {
float cart_fval;
float fval;
if (value instanceof Float) {
cart_fval = ((Float) value).floatValue();
} else {
cart_fval = Float.parseFloat(value.toString());
}
if (val instanceof Float) {
fval = ((Float) val).floatValue();
} else {
fval = Float.parseFloat(val.toString());
}
if (comparisonType.equals(LESS_THAN)) {
yes = (fval < cart_fval);
} else {
yes = (fval > cart_fval);
}
} else { // comparisonType = "="
String sval = val.toString();
String cart_sval = value.toString();
yes = sval.equals(cart_sval);
}
if (yes) {
ret = qtrue;
} else {
ret = qfalse;
}
logger.fine(trace(val, yes, ret));
return ret;
}
private String trace(Object value, boolean match, int next) {
return "NODE " + getFeature() + " [" + value + "] "
+ comparisonType + " [" + getValue() + "] "
+ (match ? "Yes" : "No") + " next " + next;
}
/**
* Get a string representation of this Node.
*/
public String toString() {
return "NODE " + getFeature() + " " + comparisonType + " "
+ getValueString() + " " + Integer.toString(qtrue) + " "
+ Integer.toString(qfalse);
}
}
/**
* A Node that checks for a regular expression match.
*/
static class MatchingNode extends DecisionNode {
Pattern pattern;
/**
* Create a new MatchingNode with the given values.
*
* @param feature the string used to get a value from an Item
* @param regex the regular expression
* @param qtrue the Node index to go to if the comparison matches
* @param qfalse the Node index to go to upon no match
*/
public MatchingNode(String feature, String regex, int qtrue, int qfalse) {
super(feature, regex, qtrue, qfalse);
this.pattern = Pattern.compile(regex);
}
/**
* Compare the given value and return the appropriate CART index.
*
* @param val the value to compare -- this must be a String
*/
public int getNextNode(Object val) {
return pattern.matcher((String) val).matches() ? qtrue : qfalse;
}
/**
* Get a string representation of this Node.
*/
public String toString() {
StringBuffer buf =
new StringBuffer(NODE + " " + getFeature() + " "
+ OPERAND_MATCHES);
buf.append(getValueString() + " ");
buf.append(Integer.toString(qtrue) + " ");
buf.append(Integer.toString(qfalse));
return buf.toString();
}
}
/**
* The final Node of a CART. This just a marker class.
*/
static class LeafNode extends Node {
/**
* Create a new LeafNode with the given value.
*
* @param the value of this LeafNode
*/
public LeafNode(Object value) {
super(value);
}
/**
* Get a string representation of this Node.
*/
public String toString() {
return "LEAF " + getValueString();
}
}
}

View file

@ -0,0 +1,145 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
import java.text.DecimalFormat;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Implementation of the FeatureSet interface.
*/
public class FeatureSet {
private final Map<String, Object> featureMap;
static DecimalFormat formatter;
/**
* Creates a new empty feature set
*/
public FeatureSet() {
featureMap = new LinkedHashMap<String, Object>();
}
/**
* Determines if the given feature is present.
*
* @param name the name of the feature of interest
*
* @return true if the named feature is present
*/
public boolean isPresent(String name) {
return featureMap.containsKey(name);
}
/**
* Removes the named feature from this set of features.
*
* @param name the name of the feature of interest
*/
public void remove(String name) {
featureMap.remove(name);
}
/**
* Convenience method that returns the named feature as a string.
*
* @param name the name of the feature
*
* @return the value associated with the name or null if the value is not
* found
*
* @throws ClassCastException if the associated value is not a String
*/
public String getString(String name) {
return (String) getObject(name);
}
/**
* Convenience method that returns the named feature as a int.
*
* @param name the name of the feature
*
* @return the value associated with the name or null if the value is not
* found
*
* @throws ClassCastException if the associated value is not an int.
*/
public int getInt(String name) {
return ((Integer) getObject(name)).intValue();
}
/**
* Convenience method that returns the named feature as a float.
*
* @param name the name of the feature
*
* @return the value associated with the name or null if the value is not
* found.
*
* @throws ClassCastException if the associated value is not a float
*/
public float getFloat(String name) {
return ((Float) getObject(name)).floatValue();
}
/**
* Returns the named feature as an object.
*
* @param name the name of the feature
*
* @return the value associated with the name or null if the value is not
* found
*/
public Object getObject(String name) {
return featureMap.get(name);
}
/**
* Convenience method that sets the named feature as a int.
*
* @param name the name of the feature
* @param value the value of the feature
*/
public void setInt(String name, int value) {
setObject(name, new Integer(value));
}
/**
* Convenience method that sets the named feature as a float.
*
* @param name the name of the feature
* @param value the value of the feature
*/
public void setFloat(String name, float value) {
setObject(name, new Float(value));
}
/**
* Convenience method that sets the named feature as a String.
*
* @param name the name of the feature
* @param value the value of the feature
*/
public void setString(String name, String value) {
setObject(name, value);
}
/**
* Sets the named feature.
*
* @param name the name of the feature
* @param value the value of the feature
*/
public void setObject(String name, Object value) {
featureMap.put(name, value);
}
}

View file

@ -0,0 +1,447 @@
/**
* Portions Copyright 2001-2003 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
import java.util.StringTokenizer;
/**
* Represents a node in a Relation. Items can have shared contents but each
* item has its own set of Daughters. The shared contents of an item
* (represented by ItemContents) includes the feature set for the item and the
* set of all relations that this item is contained in. An item can be
* contained in a number of relations and as daughters to other items. This
* class is used to keep track of all of these relationships. There may be many
* instances of item that reference the same shared ItemContents.
*/
public class Item {
private Relation ownerRelation;
private ItemContents contents;
private Item parent;
private Item daughter;
private Item next;
private Item prev;
/**
* Creates an item. The item is coupled to a particular Relation. If shared
* contents is null a new sharedContents is created.
*
* @param relation the relation that owns this item
* @param sharedContents the contents that is shared with others. If null,
* a new sharedContents is created.
*/
public Item(Relation relation, ItemContents sharedContents) {
ownerRelation = relation;
if (sharedContents != null) {
contents = sharedContents;
} else {
contents = new ItemContents();
}
parent = null;
daughter = null;
next = null;
prev = null;
getSharedContents().addItemRelation(relation.getName(), this);
}
/**
* Finds the item in the given relation that has the same shared contents.
*
* @param relationName the relation of interest
*
* @return the item as found in the given relation or null if not found
*/
public Item getItemAs(String relationName) {
return getSharedContents().getItemRelation(relationName);
}
/**
* Retrieves the owning Relation.
*
* @return the relation that owns this item
*/
public Relation getOwnerRelation() {
return ownerRelation;
}
/**
* Retrieves the shared contents for this item.
*
* @return the shared item contents
*/
public ItemContents getSharedContents() {
return contents;
}
/**
* Determines if this item has daughters.
*
* @return true if this item has daughters
*/
public boolean hasDaughters() {
return daughter != null;
}
/**
* Retrieves the first daughter of this item.
*
* @return the first daughter or null if none
*/
public Item getDaughter() {
return daughter;
}
/**
* Retrieves the Nth daughter of this item.
*
* @param which the index of the daughter to return
*
* @return the Nth daughter or null if none at the given index
*/
public Item getNthDaughter(int which) {
Item d = daughter;
int count = 0;
while (count++ != which && d != null) {
d = d.next;
}
return d;
}
/**
* Retrieves the last daughter of this item.
*
* @return the last daughter or null if none at the given index
*/
public Item getLastDaughter() {
Item d = daughter;
if (d == null) {
return null;
}
while (d.next != null) {
d = d.next;
}
return d;
}
/**
* Adds the given item as a daughter to this item.
*
* @param item for the new daughter
* @return created item
*/
public Item addDaughter(Item item) {
Item newItem;
ItemContents contents;
Item p = getLastDaughter();
if (p != null) {
newItem = p.appendItem(item);
} else {
if (item == null) {
contents = new ItemContents();
} else {
contents = item.getSharedContents();
}
newItem = new Item(getOwnerRelation(), contents);
newItem.parent = this;
daughter = newItem;
}
return newItem;
}
/**
* Creates a new Item, adds it as a daughter to this item and returns the
* new item.
*
* @return the newly created item that was added as a daughter
*/
public Item createDaughter() {
return addDaughter(null);
}
/**
* Returns the parent of this item.
*
* @return the parent of this item
*/
public Item getParent() {
Item n;
for (n = this; n.prev != null; n = n.prev) {
}
return n.parent;
}
/**
* Sets the parent of this item.
*
* @param parent the parent of this item
*/
/*
* private void setParent(Item parent) { this.parent = parent; }
*/
/**
* Returns the utterance associated with this item.
*
* @return the utterance that contains this item
*/
public Utterance getUtterance() {
return getOwnerRelation().getUtterance();
}
/**
* Returns the feature set of this item.
*
* @return the feature set of this item
*/
public FeatureSet getFeatures() {
return getSharedContents().getFeatures();
}
/**
* Finds the feature by following the given path. Path is a string of ":"
* or "." separated strings with the following interpretations:
* <ul>
* <li>n - next item
* <li>p - previous item
* <li>parent - the parent
* <li>daughter - the daughter
* <li>daughter1 - same as daughter
* <li>daughtern - the last daughter
* <li>R:relname - the item as found in the given relation 'relname'
* </ul>
* The last element of the path will be interpreted as a voice/language
* specific feature function (if present) or an item feature name. If the
* feature function exists it will be called with the item specified by the
* path, otherwise, a feature will be retrieved with the given name. If
* neither exist than a String "0" is returned.
*
* @param pathAndFeature the path to follow
* @return created object
*/
public Object findFeature(String pathAndFeature) {
int lastDot;
String feature;
String path;
Item item;
Object results = null;
lastDot = pathAndFeature.lastIndexOf(".");
// string can be of the form "p.feature" or just "feature"
if (lastDot == -1) {
feature = pathAndFeature;
path = null;
} else {
feature = pathAndFeature.substring(lastDot + 1);
path = pathAndFeature.substring(0, lastDot);
}
item = findItem(path);
if (item != null) {
results = item.getFeatures().getObject(feature);
}
results = (results == null) ? "0" : results;
// System.out.println("FI " + pathAndFeature + " are " + results);
return results;
}
/**
* Finds the item specified by the given path.
*
* Path is a string of ":" or "." separated strings with the following
* interpretations:
* <ul>
* <li>n - next item
* <li>p - previous item
* <li>parent - the parent
* <li>daughter - the daughter
* <li>daughter1 - same as daughter
* <li>daughtern - the last daughter
* <li>R:relname - the item as found in the given relation 'relname'
* </ul>
* If the given path takes us outside of the bounds of the item graph, then
* list access exceptions will be thrown.
*
* @param path the path to follow
*
* @return the item at the given path
*/
public Item findItem(String path) {
Item pitem = this;
StringTokenizer tok;
if (path == null) {
return this;
}
tok = new StringTokenizer(path, ":.");
while (pitem != null && tok.hasMoreTokens()) {
String token = tok.nextToken();
if (token.equals("n")) {
pitem = pitem.getNext();
} else if (token.equals("p")) {
pitem = pitem.getPrevious();
} else if (token.equals("nn")) {
pitem = pitem.getNext();
if (pitem != null) {
pitem = pitem.getNext();
}
} else if (token.equals("pp")) {
pitem = pitem.getPrevious();
if (pitem != null) {
pitem = pitem.getPrevious();
}
} else if (token.equals("parent")) {
pitem = pitem.getParent();
} else if (token.equals("daughter") || token.equals("daughter1")) {
pitem = pitem.getDaughter();
} else if (token.equals("daughtern")) {
pitem = pitem.getLastDaughter();
} else if (token.equals("R")) {
String relationName = tok.nextToken();
pitem =
pitem.getSharedContents()
.getItemRelation(relationName);
} else {
System.out.println("findItem: bad feature " + token + " in "
+ path);
}
}
return pitem;
}
/**
* Gets the next item in this list.
*
* @return the next item or null
*/
public Item getNext() {
return next;
}
/**
* Gets the previous item in this list.
*
* @return the previous item or null
*/
public Item getPrevious() {
return prev;
}
/**
* Appends an item in this list after this item.
*
* @param originalItem new item has shared contents with this item (or *
* null)
*
* @return the newly appended item
*/
public Item appendItem(Item originalItem) {
ItemContents contents;
Item newItem;
if (originalItem == null) {
contents = null;
} else {
contents = originalItem.getSharedContents();
}
newItem = new Item(getOwnerRelation(), contents);
newItem.next = this.next;
if (this.next != null) {
this.next.prev = newItem;
}
attach(newItem);
if (this.ownerRelation.getTail() == this) {
this.ownerRelation.setTail(newItem);
}
return newItem;
}
/**
* Attaches/appends an item to this one.
*
* @param item the item to append
*/
void attach(Item item) {
this.next = item;
item.prev = this;
}
/**
* Prepends an item in this list before this item.
*
* @param originalItem new item has shared contents with this item (or *
* null)
*
* @return the newly appended item
*/
public Item prependItem(Item originalItem) {
ItemContents contents;
Item newItem;
if (originalItem == null) {
contents = null;
} else {
contents = originalItem.getSharedContents();
}
newItem = new Item(getOwnerRelation(), contents);
newItem.prev = this.prev;
if (this.prev != null) {
this.prev.next = newItem;
}
newItem.next = this;
this.prev = newItem;
if (this.parent != null) {
this.parent.daughter = newItem;
newItem.parent = this.parent;
this.parent = null;
}
if (this.ownerRelation.getHead() == this) {
this.ownerRelation.setHead(newItem);
}
return newItem;
}
// Inherited from object
public String toString() {
// if we have a feature called 'name' use that
// otherwise fall back on the default.
String name = getFeatures().getString("name");
if (name == null) {
name = "";
}
return name;
}
/**
* Determines if the shared contents of the two items are the same.
*
* @param otherItem the item to compare
*
* @return true if the shared contents are the same
*/
public boolean equalsShared(Item otherItem) {
if (otherItem == null) {
return false;
} else {
return getSharedContents().equals(otherItem.getSharedContents());
}
}
}

View file

@ -0,0 +1,74 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
/**
* Contains the information that is shared between multiple items.
*/
public class ItemContents {
private FeatureSet features;
private FeatureSet relations;
/**
* Class Constructor.
*/
public ItemContents() {
features = new FeatureSet();
relations = new FeatureSet();
}
/**
* Adds the given item to the set of relations. Whenever an item is added
* to a relation, it should add the name and the Item reference to this set
* of name/item mappings. This allows an item to find out the set of all
* relations that it is contained in.
*
* @param relationName the name of the relation
* @param item the item reference in the relation
*/
public void addItemRelation(String relationName, Item item) {
// System.out.println("AddItemRelation: " + relationName
// + " item: " + item);
relations.setObject(relationName, item);
}
/**
* Removes the relation/item mapping from this ItemContents.
*
* @param relationName the name of the relation/item to remove
*/
public void removeItemRelation(String relationName) {
relations.remove(relationName);
}
/**
* Given the name of a relation, returns the item the shares the same
* ItemContents.
*
* @param relationName the name of the relation of interest
*
* @return the item associated with this ItemContents in the named
* relation, or null if it does not exist
*/
public Item getItemRelation(String relationName) {
return (Item) relations.getObject(relationName);
}
/**
* Returns the feature set for this item contents.
*
* @return the FeatureSet for this contents
*/
public FeatureSet getFeatures() {
return features;
}
}

View file

@ -0,0 +1,449 @@
/**
* Portions Copyright 2001-2003 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
/**
* Expands Strings containing digits characters into a list of words
* representing those digits.
*
* It translates the following code from flite:
* <code>lang/usEnglish/us_expand.c</code>
*/
public class NumberExpander {
private static final String[] digit2num = {"zero", "one", "two", "three",
"four", "five", "six", "seven", "eight", "nine"};
private static final String[] digit2teen = {"ten", /* shouldn't get called */
"eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen",
"seventeen", "eighteen", "nineteen"};
private static final String[] digit2enty = {"zero", /* shouldn't get called */
"ten", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety"};
private static final String[] ord2num = {"zeroth", "first", "second",
"third", "fourth", "fifth", "sixth", "seventh", "eighth", "ninth"};
private static final String[] ord2teen = {"tenth", /* shouldn't get called */
"eleventh", "twelfth", "thirteenth", "fourteenth", "fifteenth",
"sixteenth", "seventeenth", "eighteenth", "nineteenth"};
private static final String[] ord2enty = {"zeroth", /* shouldn't get called */
"tenth", "twentieth", "thirtieth", "fortieth", "fiftieth", "sixtieth",
"seventieth", "eightieth", "ninetieth"};
private static String[] digit2Numness = {
"", "tens", "twenties", "thirties", "fourties", "fifties",
"sixties", "seventies", "eighties", "nineties"
};
/**
* Unconstructable
*/
private NumberExpander() {}
/**
* Expands a digit string into a list of English words of those digits. For
* example, "1234" expands to "one two three four"
*
* @param numberString the digit string to expand.
* @param wordRelation words are added to this Relation
*/
public static void expandNumber(String numberString,
WordRelation wordRelation) {
int numDigits = numberString.length();
if (numDigits == 0) {
// wordRelation = null;
} else if (numDigits == 1) {
expandDigits(numberString, wordRelation);
} else if (numDigits == 2) {
expand2DigitNumber(numberString, wordRelation);
} else if (numDigits == 3) {
expand3DigitNumber(numberString, wordRelation);
} else if (numDigits < 7) {
expandBelow7DigitNumber(numberString, wordRelation);
} else if (numDigits < 10) {
expandBelow10DigitNumber(numberString, wordRelation);
} else if (numDigits < 13) {
expandBelow13DigitNumber(numberString, wordRelation);
} else {
expandDigits(numberString, wordRelation);
}
}
/**
* Expands a two-digit string into a list of English words.
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
private static void expand2DigitNumber(String numberString,
WordRelation wordRelation) {
if (numberString.charAt(0) == '0') {
// numberString is "0X"
if (numberString.charAt(1) == '0') {
// numberString is "00", do nothing
} else {
// numberString is "01", "02" ...
String number = digit2num[numberString.charAt(1) - '0'];
wordRelation.addWord(number);
}
} else if (numberString.charAt(1) == '0') {
// numberString is "10", "20", ...
String number = digit2enty[numberString.charAt(0) - '0'];
wordRelation.addWord(number);
} else if (numberString.charAt(0) == '1') {
// numberString is "11", "12", ..., "19"
String number = digit2teen[numberString.charAt(1) - '0'];
wordRelation.addWord(number);
} else {
// numberString is "2X", "3X", ...
String enty = digit2enty[numberString.charAt(0) - '0'];
wordRelation.addWord(enty);
expandDigits(numberString.substring(1, numberString.length()),
wordRelation);
}
}
/**
* Expands a three-digit string into a list of English words.
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
private static void expand3DigitNumber(String numberString,
WordRelation wordRelation) {
if (numberString.charAt(0) == '0') {
expandNumberAt(numberString, 1, wordRelation);
} else {
String hundredDigit = digit2num[numberString.charAt(0) - '0'];
wordRelation.addWord(hundredDigit);
wordRelation.addWord("hundred");
expandNumberAt(numberString, 1, wordRelation);
}
}
/**
* Expands a string that is a 4 to 6 digits number into a list of English
* words. For example, "333000" into "three hundred and thirty-three
* thousand".
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
private static void expandBelow7DigitNumber(String numberString,
WordRelation wordRelation) {
expandLargeNumber(numberString, "thousand", 3, wordRelation);
}
/**
* Expands a string that is a 7 to 9 digits number into a list of English
* words. For example, "19000000" into nineteen million.
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
private static void expandBelow10DigitNumber(String numberString,
WordRelation wordRelation) {
expandLargeNumber(numberString, "million", 6, wordRelation);
}
/**
* Expands a string that is a 10 to 12 digits number into a list of English
* words. For example, "27000000000" into twenty-seven billion.
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
private static void expandBelow13DigitNumber(String numberString,
WordRelation wordRelation) {
expandLargeNumber(numberString, "billion", 9, wordRelation);
}
/**
* Expands a string that is a number longer than 3 digits into a list of
* English words. For example, "1000" into one thousand.
*
* @param numberString the string which is the number to expand
* @param order either "thousand", "million", or "billion"
* @param numberZeroes the number of zeroes, depending on the order, so its
* either 3, 6, or 9
* @param wordRelation words are added to this Relation
*/
private static void expandLargeNumber(String numberString, String order,
int numberZeroes, WordRelation wordRelation) {
int numberDigits = numberString.length();
// parse out the prefix, e.g., "113" in "113,000"
int i = numberDigits - numberZeroes;
String part = numberString.substring(0, i);
// get how many thousands/millions/billions
Item oldTail = wordRelation.getTail();
expandNumber(part, wordRelation);
if (wordRelation.getTail() != oldTail) {
wordRelation.addWord(order);
}
expandNumberAt(numberString, i, wordRelation);
}
/**
* Returns the number string list of the given string starting at the given
* index. E.g., expandNumberAt("1100", 1) gives "one hundred"
*
* @param numberString the string which is the number to expand
* @param startIndex the starting position
* @param wordRelation words are added to this Relation
*/
private static void expandNumberAt(String numberString, int startIndex,
WordRelation wordRelation) {
expandNumber(
numberString.substring(startIndex, numberString.length()),
wordRelation);
}
/**
* Expands given token to list of words pronouncing it as digits
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
public static void expandDigits(String numberString,
WordRelation wordRelation) {
int numberDigits = numberString.length();
for (int i = 0; i < numberDigits; i++) {
char digit = numberString.charAt(i);
if (Character.isDigit(digit)) {
wordRelation.addWord(digit2num[numberString.charAt(i) - '0']);
} else {
wordRelation.addWord("umpty");
}
}
}
/**
* Expands the digit string of an ordinal number.
*
* @param rawNumberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
public static void expandOrdinal(String rawNumberString,
WordRelation wordRelation) {
// remove all ','s from the raw number string
expandNumber(rawNumberString.replace(",", ""), wordRelation);
// get the last in the list of number strings
Item lastItem = wordRelation.getTail();
if (lastItem != null) {
FeatureSet featureSet = lastItem.getFeatures();
String lastNumber = featureSet.getString("name");
String ordinal = findMatchInArray(lastNumber, digit2num, ord2num);
if (ordinal == null) {
ordinal = findMatchInArray(lastNumber, digit2teen, ord2teen);
}
if (ordinal == null) {
ordinal = findMatchInArray(lastNumber, digit2enty, ord2enty);
}
if (lastNumber.equals("hundred")) {
ordinal = "hundredth";
} else if (lastNumber.equals("thousand")) {
ordinal = "thousandth";
} else if (lastNumber.equals("billion")) {
ordinal = "billionth";
}
// if there was an ordinal, set the last element of the list
// to that ordinal; otherwise, don't do anything
if (ordinal != null) {
wordRelation.setLastWord(ordinal);
}
}
}
public static void expandNumess(String rawString, WordRelation wordRelation) {
if (rawString.length() == 4) {
expand2DigitNumber(rawString.substring(0, 2), wordRelation);
expandNumess(rawString.substring(2), wordRelation);
} else {
wordRelation.addWord(digit2Numness[rawString.charAt(0) - '0']);
}
}
/**
* Finds a match of the given string in the given array, and returns the
* element at the same index in the returnInArray
*
* @param strToMatch the string to match
* @param matchInArray the source array
* @param returnInArray the return array
*
* @return an element in returnInArray, or <code>null</code> if a match is
* not found
*/
private static String findMatchInArray(String strToMatch,
String[] matchInArray, String[] returnInArray) {
for (int i = 0; i < matchInArray.length; i++) {
if (strToMatch.equals(matchInArray[i])) {
if (i < returnInArray.length) {
return returnInArray[i];
} else {
return null;
}
}
}
return null;
}
/**
* Expands the given number string as pairs as in years or IDs
*
* @param numberString the string which is the number to expand
* @param wordRelation words are added to this Relation
*/
public static void expandID(String numberString, WordRelation wordRelation) {
int numberDigits = numberString.length();
if ((numberDigits == 4) && (numberString.charAt(2) == '0')
&& (numberString.charAt(3) == '0')) {
if (numberString.charAt(1) == '0') { // e.g. 2000, 3000
expandNumber(numberString, wordRelation);
} else {
expandNumber(numberString.substring(0, 2), wordRelation);
wordRelation.addWord("hundred");
}
} else if ((numberDigits == 2) && (numberString.charAt(0) == '0')) {
wordRelation.addWord("oh");
expandDigits(numberString.substring(1, 2), wordRelation);
} else if ((numberDigits == 4 && numberString.charAt(1) == '0')
|| numberDigits < 3) {
expandNumber(numberString, wordRelation);
} else if (numberDigits % 2 == 1) {
String firstDigit = digit2num[numberString.charAt(0) - '0'];
wordRelation.addWord(firstDigit);
expandID(numberString.substring(1, numberDigits), wordRelation);
} else {
expandNumber(numberString.substring(0, 2), wordRelation);
expandID(numberString.substring(2, numberDigits), wordRelation);
}
}
/**
* Expands the given number string as a real number.
*
* @param numberString the string which is the real number to expand
* @param wordRelation words are added to this Relation
*/
public static void expandReal(String numberString,
WordRelation wordRelation) {
int stringLength = numberString.length();
int position;
if (numberString.charAt(0) == '-') {
// negative real numbers
wordRelation.addWord("minus");
expandReal(numberString.substring(1, stringLength), wordRelation);
} else if (numberString.charAt(0) == '+') {
// prefixed with a '+'
wordRelation.addWord("plus");
expandReal(numberString.substring(1, stringLength), wordRelation);
} else if ((position = numberString.indexOf('e')) != -1
|| (position = numberString.indexOf('E')) != -1) {
// numbers with 'E' or 'e'
expandReal(numberString.substring(0, position), wordRelation);
wordRelation.addWord("e");
expandReal(numberString.substring(position + 1), wordRelation);
} else if ((position = numberString.indexOf('.')) != -1) {
// numbers with '.'
String beforeDot = numberString.substring(0, position);
if (beforeDot.length() > 0) {
expandReal(beforeDot, wordRelation);
}
wordRelation.addWord("point");
String afterDot = numberString.substring(position + 1);
if (afterDot.length() > 0) {
expandDigits(afterDot, wordRelation);
}
} else {
// everything else
expandNumber(numberString, wordRelation);
}
}
/**
* Expands the given string of letters as a list of single char symbols.
*
* @param letters the string of letters to expand
* @param wordRelation words are added to this Relation
*/
public static void expandLetters(String letters, WordRelation wordRelation) {
letters = letters.toLowerCase();
char c;
for (int i = 0; i < letters.length(); i++) {
// if this is a number
c = letters.charAt(i);
if (Character.isDigit(c)) {
wordRelation.addWord(digit2num[c - '0']);
} else if (letters.equals("a")) {
wordRelation.addWord("_a");
} else {
wordRelation.addWord(String.valueOf(c));
}
}
}
/**
* Returns the integer value of the given string of Roman numerals.
*
* @param roman the string of Roman numbers
*
* @return the integer value
*/
public static int expandRoman(String roman) {
int value = 0;
for (int p = 0; p < roman.length(); p++) {
char c = roman.charAt(p);
if (c == 'X') {
value += 10;
} else if (c == 'V') {
value += 5;
} else if (c == 'I') {
if (p + 1 < roman.length()) {
char p1 = roman.charAt(p + 1);
if (p1 == 'V') {
value += 4;
p++;
} else if (p1 == 'X') {
value += 9;
p++;
} else {
value += 1;
}
} else {
value += 1;
}
}
}
return value;
}
}

View file

@ -0,0 +1,264 @@
/**
* Portions Copyright 2001 Sun Microsystems, Inc.
* Portions Copyright 1999-2001 Language Technologies Institute,
* Carnegie Mellon University.
* All Rights Reserved. Use is subject to license terms.
*
* See the file "license.terms" for information on usage and
* redistribution of this file, and for a DISCLAIMER OF ALL
* WARRANTIES.
*/
package edu.cmu.sphinx.alignment.tokenizer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Interface that Manages a feature or item path. Allows navigation to the
* corresponding feature or item. This class in controlled by the following
* system properties:
*
* <pre>
* com.sun.speech.freetts.interpretCartPaths - default false
* com.sun.speech.freetts.lazyCartCompile - default true
* </pre>
*
* com.sun.speech.freetts.interpretCartPaths
*
* Instances of this class will optionally pre-compile the paths. Pre-compiling
* paths reduces the processing time and objects needed to extract a feature or
* an item based upon a path.
*/
public class PathExtractor {
/** Logger instance. */
private static final Logger LOGGER = Logger
.getLogger(PathExtractor.class.getName());
/**
* If this system property is set to true, paths will not be compiled.
*/
public final static String INTERPRET_PATHS_PROPERTY =
"com.sun.speech.freetts.interpretCartPaths";
/**
* If this system property is set to true, CART feature/item paths will
* only be compiled as needed.
*/
public final static String LAZY_COMPILE_PROPERTY =
"com.sun.speech.freetts.lazyCartCompile";
private final static boolean INTERPRET_PATHS = System.getProperty(
INTERPRET_PATHS_PROPERTY, "false").equals("true");
private final static boolean LAZY_COMPILE = System.getProperty(
LAZY_COMPILE_PROPERTY, "true").equals("true");
private String pathAndFeature;
private String path;
private String feature;
private Object[] compiledPath;
/**
* Creates a path for the given feature.
* @param pathAndFeature string to use
* @param wantFeature do we need features
*/
public PathExtractor(String pathAndFeature, boolean wantFeature) {
this.pathAndFeature = pathAndFeature;
if (INTERPRET_PATHS) {
path = pathAndFeature;
return;
}
if (wantFeature) {
int lastDot = pathAndFeature.lastIndexOf(".");
// string can be of the form "p.feature" or just "feature"
if (lastDot == -1) {
feature = pathAndFeature;
path = null;
} else {
feature = pathAndFeature.substring(lastDot + 1);
path = pathAndFeature.substring(0, lastDot);
}
} else {
this.path = pathAndFeature;
}
if (!LAZY_COMPILE) {
compiledPath = compile(path);
}
}
/**
* Finds the item associated with this Path.
*
* @param item the item to start at
* @return the item associated with the path or null
*/
public Item findItem(Item item) {
if (INTERPRET_PATHS) {
return item.findItem(path);
}
if (compiledPath == null) {
compiledPath = compile(path);
}
Item pitem = item;
for (int i = 0; pitem != null && i < compiledPath.length;) {
OpEnum op = (OpEnum) compiledPath[i++];
if (op == OpEnum.NEXT) {
pitem = pitem.getNext();
} else if (op == OpEnum.PREV) {
pitem = pitem.getPrevious();
} else if (op == OpEnum.NEXT_NEXT) {
pitem = pitem.getNext();
if (pitem != null) {
pitem = pitem.getNext();
}
} else if (op == OpEnum.PREV_PREV) {
pitem = pitem.getPrevious();
if (pitem != null) {
pitem = pitem.getPrevious();
}
} else if (op == OpEnum.PARENT) {
pitem = pitem.getParent();
} else if (op == OpEnum.DAUGHTER) {
pitem = pitem.getDaughter();
} else if (op == OpEnum.LAST_DAUGHTER) {
pitem = pitem.getLastDaughter();
} else if (op == OpEnum.RELATION) {
String relationName = (String) compiledPath[i++];
pitem =
pitem.getSharedContents()
.getItemRelation(relationName);
} else {
System.out.println("findItem: bad feature " + op + " in "
+ path);
}
}
return pitem;
}
/**
* Finds the feature associated with this Path.
*
* @param item the item to start at
* @return the feature associated or "0" if the feature was not found.
*/
public Object findFeature(Item item) {
if (INTERPRET_PATHS) {
return item.findFeature(path);
}
Item pitem = findItem(item);
Object results = null;
if (pitem != null) {
if (LOGGER.isLoggable(Level.FINER)) {
LOGGER.finer("findFeature: Item [" + pitem + "], feature '"
+ feature + "'");
}
results = pitem.getFeatures().getObject(feature);
}
results = (results == null) ? "0" : results;
if (LOGGER.isLoggable(Level.FINER)) {
LOGGER.finer("findFeature: ...results = '" + results + "'");
}
return results;
}
/**
* Compiles the given path into the compiled form
*
* @param path the path to compile
* @return the compiled form which is in the form of an array path
* traversal enums and associated strings
*/
private Object[] compile(String path) {
if (path == null) {
return new Object[0];
}
List<Object> list = new ArrayList<Object>();
StringTokenizer tok = new StringTokenizer(path, ":.");
while (tok.hasMoreTokens()) {
String token = tok.nextToken();
OpEnum op = OpEnum.getInstance(token);
if (op == null) {
throw new Error("Bad path compiled " + path);
}
list.add(op);
if (op == OpEnum.RELATION) {
list.add(tok.nextToken());
}
}
return list.toArray();
}
// inherited for Object
public String toString() {
return pathAndFeature;
}
// TODO: add these to the interface should we support binary
// files
/*
* public void writeBinary(); public void readBinary();
*/
}
/**
* An enumerated type associated with path operations.
*/
class OpEnum {
static private Map<String, OpEnum> map = new HashMap<String, OpEnum>();
public final static OpEnum NEXT = new OpEnum("n");
public final static OpEnum PREV = new OpEnum("p");
public final static OpEnum NEXT_NEXT = new OpEnum("nn");
public final static OpEnum PREV_PREV = new OpEnum("pp");
public final static OpEnum PARENT = new OpEnum("parent");
public final static OpEnum DAUGHTER = new OpEnum("daughter");
public final static OpEnum LAST_DAUGHTER = new OpEnum("daughtern");
public final static OpEnum RELATION = new OpEnum("R");
private String name;
/**
* Creates a new OpEnum.. There is a limited set of OpEnums
*
* @param name the path name for this Enum
*/
private OpEnum(String name) {
this.name = name;
map.put(name, this);
}
/**
* gets an OpEnum thats associated with the given name.
*
* @param name the name of the OpEnum of interest
*/
public static OpEnum getInstance(String name) {
return (OpEnum) map.get(name);
}
// inherited from Object
public String toString() {
return name;
}
}

Some files were not shown because too many files have changed in this diff Show more