Commits

Ivan Vučica committed 96fa5cf

English and Japanese voices played using AVAudioPlayer.

Comments (0)

Files changed (19)

API/SpeechEngine.h

+/*
+    File: SpeechEngine.h
+Abstract: Definition of the SPI between the Speech Synthesis API and a speech engine that
+			implements the actual synthesis technology.  Each voice is matched to its appropriate
+			speech engine via a type code stored in the voice.
+
+			This documentation requires an understanding of the Speech Synthesis Manager
+ Version: 1.0
+
+Disclaimer: IMPORTANT:  This Apple software is supplied to you by Apple
+Inc. ("Apple") in consideration of your agreement to the following
+terms, and your use, installation, modification or redistribution of
+this Apple software constitutes acceptance of these terms.  If you do
+not agree with these terms, please do not use, install, modify or
+redistribute this Apple software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, Apple grants you a personal, non-exclusive
+license, under Apple's copyrights in this original Apple software (the
+"Apple Software"), to use, reproduce, modify and redistribute the Apple
+Software, with or without modifications, in source and/or binary forms;
+provided that if you redistribute the Apple Software in its entirety and
+without modifications, you must retain this notice and the following
+text and disclaimers in all such redistributions of the Apple Software.
+Neither the name, trademarks, service marks or logos of Apple Inc. may
+be used to endorse or promote products derived from the Apple Software
+without specific prior written permission from Apple.  Except as
+expressly stated in this notice, no other rights or licenses, express or
+implied, are granted by Apple herein, including but not limited to any
+patent rights that may be infringed by your derivative works or by other
+works in which the Apple Software may be incorporated.
+
+The Apple Software is provided by Apple on an "AS IS" basis.  APPLE
+MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
+THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
+OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
+
+IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
+MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
+AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
+STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+Copyright (C) 2011 Apple Inc. All Rights Reserved.
+
+*/
+
+/*
+ * VOICES
+ *
+ * Voices are bundles installed in DOMAIN/Library/Speech/Voices/YOUR_VOICE_NAME.SpeechVoice, where DOMAIN is one of three
+ * domains: system, local, or user.
+ *
+ * If the voice is designed to run on Mac OS X 10.4 and earlier it must contain a VoiceDescription file at the location YOUR_VOICE_NAME.SpeechVoice/Contents/Resources/VoiceDescription.
+ * The VoiceDescription file contains the voice's attributes in binary form using the struct VoiceDescription, as defined in SpeechSynthesis.h.
+ * The voice's Info.plist file should also include additional voice attributes that VoiceOver uses (VoiceSupportedCharacters & VoiceIndividuallySpokenCharacters).
+ *
+ * If the voice will only support Mac OS X 10.5 and later, then a VoiceDescription file is not necesary and all voice attributes can be defined in the voice's Info.plist file.
+ * 
+ * NOTE: Voice bundle names cannot contain spaces.  However, the name of the voice that is specified in the
+ * VoiceDescription file and displayed to the user can contain spaces.
+ *
+ *
+ */
+
+#define kSpeechVoiceSynthesizerNumericID		CFSTR("VoiceSynthesizerNumericID")
+#define kSpeechVoiceNumericID					CFSTR("VoiceNumericID")
+
+
+/*
+ * SYNTHESIZERS
+ *
+ * Speech Synthesizers are bundles installed in /System/Library/Speech/Synthesizers/YOUR_SYNTHESIZER_NAME.SpeechSynthesizer
+ *
+ * Define _SUPPORT_SPEECH_SYNTHESIS_IN_MAC_OS_X_VERSION_10_0_THROUGH_10_4__ as true if your synthesizer is intended to run on Mac OS X 10.4 and earlier.
+ *
+ *
+ *
+ */
+
+
+#define kSpeechEngineTypeArrayKey CFSTR("SpeechEngineTypeArray")
+
+#if _SUPPORT_SPEECH_SYNTHESIS_IN_MAC_OS_X_VERSION_10_0_THROUGH_10_4__
+/* Engine Description (in YOUR_SYNTHESIZER_NAME.SpeechSynthesizer/Contents/Resources/SpeechEngineDescription) */
+typedef struct SpeechEngineDesc
+{
+	long		fFileFormat;	// Currently 2
+	OSType		fEngineType[3]; // Voice types handled, padded with \0\0\0\0 if necessary
+} SpeechEngineDesc;
+
+/* Engine (in YOUR_SYNTHESIZER_NAME.SpeechSynthesizer/Contents/MacOS/YOUR_SYNTHESIZER_NAME) */
+#endif
+
+/* Token to identify your private per-channel data */
+typedef long SpeechChannelIdentifier;
+
+
+/* API: These functions must be defined and exported with these names and extern "C" linkage. All of them
+   return an OSStatus result.
+*/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Open channel - called from NewSpeechChannel, passes back in *ssr a unique SpeechChannelIdentifier value of your choosing. */
+long	SEOpenSpeechChannel	( SpeechChannelIdentifier* ssr );
+
+/* Set the voice to be used for the channel. Voice type guaranteed to be compatible with above spec */
+long 	SEUseVoice 			( SpeechChannelIdentifier ssr, VoiceSpec* voice, CFBundleRef inVoiceSpecBundle );
+
+/* Close channel */
+long	SECloseSpeechChannel( SpeechChannelIdentifier ssr ); 
+
+/* Analogous to corresponding speech synthesis API calls, except for details noted below */
+
+/* Must also be able to parse and handle the embedded commands defined in Inside Macintosh: Speech */
+long 	SESpeakCFString			( SpeechChannelIdentifier ssr, CFStringRef text, CFDictionaryRef options);
+long 	SECopySpeechProperty	( SpeechChannelIdentifier ssr, CFStringRef property, CFTypeRef * object );
+long 	SESetSpeechProperty		( SpeechChannelIdentifier ssr, CFStringRef property, CFTypeRef object);
+long 	SEUseSpeechDictionary 	( SpeechChannelIdentifier ssr, CFDictionaryRef speechDictionary );
+long 	SECopyPhonemesFromText 	( SpeechChannelIdentifier ssr, CFStringRef text, CFStringRef * phonemes);
+long 	SEStopSpeechAt			( SpeechChannelIdentifier ssr, unsigned long whereToPause); 
+long 	SEPauseSpeechAt			( SpeechChannelIdentifier ssr, unsigned long whereToPause );
+long 	SEContinueSpeech		( SpeechChannelIdentifier ssr );
+	
+#if _SUPPORT_SPEECH_SYNTHESIS_IN_MAC_OS_X_VERSION_10_0_THROUGH_10_4__
+
+/* Must also be able to parse and handle the embedded commands defined in Inside Macintosh: Speech */
+long 	SESpeakBuffer		( SpeechChannelIdentifier ssr, Ptr textBuf, long byteLen, long controlFlags ); 
+long 	SETextToPhonemes 	( SpeechChannelIdentifier ssr, char* textBuf, long textBytes, void** phonemeBuf, long* phonBytes);
+long 	SEUseDictionary 	( SpeechChannelIdentifier ssr, void* dictionary, long dictLength );
+
+/* The soPhonemeSymbols call is passed as soPhonemeSymbolsPtr ('phsp'); speechInfo passes a pointer to a (void *)
+   The engine has to allocate a sufficiently sized area with malloc(), fill it in, and store it into 
+   *(void **)speechInfo. The API will dispose the memory. The call is rarely used and can probably be left 
+   unimplemented. 
+
+   Must be able to handle all selectors defined in Inside Macintosh: Speech.
+*/
+long 	SEGetSpeechInfo		( SpeechChannelIdentifier ssr, unsigned long selector, void* speechInfo );
+
+/* soCurrentVoice will be handled by the API (and SEUseVoice, if necessary 
+
+   Must be able to handle all selectors defined in Inside Macintosh: Speech, including those for the various callbacks,
+   with the exception of soCurrentA5 and soSoundOutput.
+*/
+long 	SESetSpeechInfo		( SpeechChannelIdentifier ssr, unsigned long selector, void* speechInfo );
+
+/* Same as SEGetSpeechInfo(ssr, soStatus, status). Will probably get dropped in next release of MacOS X */
+long 	SESpeechStatus 		( SpeechChannelIdentifier ssr, SpeechStatusInfo * status );
+
+#endif
+
+/*  The SEWillUnloadBundle function is required to be implemented by synthesizers that can be loaded and unloaded on-the-fly 
+	from a location outside the standard directories in which synthesizers are found automatically. This function is called 
+	prior to the synthesizer's bundle being unloaded, usually as a result of the client calling SpeechSynthesisUnregisterModuleURL. 
+	
+	When called, the synthesizer should remove any run loops and threads created by the bundle so that its code can be removed 
+	from memory and the executable file closed. If the synthesizer was successful in preparing for unloading, then return 0 (zero);
+	otherwise, return -1.
+*/
+long 	SEWillUnloadBundle	(void);
+
+/* Internal selectors used by the Speech Synthesis Audio Unit */
+#define kSpeechAudioUnit	CFSTR("aunt")
+#define kSpeechAudioGraph	CFSTR("augr")
+#define kSpeechOfflineMode	CFSTR("offl")
+
+#if _SUPPORT_SPEECH_SYNTHESIS_IN_MAC_OS_X_VERSION_10_0_THROUGH_10_4__
+#define soAudioUnit		'aunt'
+#define soAudioGraph	'augr'
+#define soOffline		'offl'
+#endif
+	
+#ifdef __cplusplus
+}
+#endif
+

API/SynthesizerAPI.m

+/*
+    File: SynthesizerAPI.m
+Abstract: Implement Speech Engine API calls.
+
+Modifications (c) 2012 Ivan Vučica
+
+While theoretically these calls can be implemented in a procedural language,
+our approach is to represent a speech channel object as an instance of a 
+synthesizer class, to which all the API calls delegate the actual work.
+ Version: 1.0
+
+Disclaimer: IMPORTANT:  This Apple software is supplied to you by Apple
+Inc. ("Apple") in consideration of your agreement to the following
+terms, and your use, installation, modification or redistribution of
+this Apple software constitutes acceptance of these terms.  If you do
+not agree with these terms, please do not use, install, modify or
+redistribute this Apple software.
+
+In consideration of your agreement to abide by the following terms, and
+subject to these terms, Apple grants you a personal, non-exclusive
+license, under Apple's copyrights in this original Apple software (the
+"Apple Software"), to use, reproduce, modify and redistribute the Apple
+Software, with or without modifications, in source and/or binary forms;
+provided that if you redistribute the Apple Software in its entirety and
+without modifications, you must retain this notice and the following
+text and disclaimers in all such redistributions of the Apple Software.
+Neither the name, trademarks, service marks or logos of Apple Inc. may
+be used to endorse or promote products derived from the Apple Software
+without specific prior written permission from Apple.  Except as
+expressly stated in this notice, no other rights or licenses, express or
+implied, are granted by Apple herein, including but not limited to any
+patent rights that may be infringed by your derivative works or by other
+works in which the Apple Software may be incorporated.
+
+The Apple Software is provided by Apple on an "AS IS" basis.  APPLE
+MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
+THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
+OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
+
+IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
+OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
+MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
+AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
+STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+Copyright (C) 2011 Apple Inc. All Rights Reserved.
+
+*/
+
+#include "GoogleTTSSynthesizer.h"
+#include <ApplicationServices/ApplicationServices.h>
+
+// We support only the CoreFoundation API
+#import "GoogleTTSSynthesizerCF.h"
+
+
+#include "SpeechEngine.h"
+
+//
+// This example uses the synthesizer plug-in API supported in Mac OS X 10.6 and later versions.
+// It demonstrates all audio output methods defined in 10.6
+//
+
+/* Open channel - called from NewSpeechChannel, passes back in *ssr a unique SpeechChannelIdentifier value of your choosing. */
+long	SEOpenSpeechChannel( SpeechChannelIdentifier* ssr )
+{
+	//
+    // Pass back an identifier for this new channel.
+	//
+	SpeechChannelIdentifier newChannel = 
+           (SpeechChannelIdentifier)[[GoogleTTSSynthesizer alloc] init];
+    if (ssr) 
+        *ssr = newChannel;
+
+	// This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	synthOpenFailed		-241	Could not open another speech synthesizer channel 
+	
+    return newChannel ? noErr : synthOpenFailed;
+}
+
+/* Set the voice to be used for the channel. Voice type guaranteed to be compatible with above spec */
+long 	SEUseVoice( SpeechChannelIdentifier ssr, VoiceSpec* voice, CFBundleRef inVoiceSpecBundle )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    //	voiceNotFound		-244	Voice resource not found 
+
+	return [(GoogleTTSSynthesizer *)ssr useVoice:voice withBundle:inVoiceSpecBundle];
+}
+
+/* Close channel */
+long	SECloseSpeechChannel( SpeechChannelIdentifier ssr )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    [(GoogleTTSSynthesizer *)ssr close];
+
+    return noErr;
+} 
+
+/* Analogous to corresponding speech synthesis API calls, except for details noted below */
+
+/********* Universal API calls ***************/
+
+long 	SEStopSpeechAt( SpeechChannelIdentifier ssr, unsigned long whereToStop)
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+    return [(GoogleTTSSynthesizer *)ssr stopSpeakingAt:whereToStop];
+} 
+
+long 	SEPauseSpeechAt( SpeechChannelIdentifier ssr, unsigned long whereToPause )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+    return [(GoogleTTSSynthesizer *)ssr pauseSpeakingAt:whereToPause];
+} 
+
+long 	SEContinueSpeech( SpeechChannelIdentifier ssr )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+    return [(GoogleTTSSynthesizer *)ssr continueSpeaking];
+} 
+
+/*  Try to release all resources that would require this bundle to remain in memory.
+*/
+long 	SEWillUnloadBundle()
+{
+/*  The SEWillUnloadBundle function is required to be implemented by synthesizers that can be loaded and unloaded on-the-fly 
+	from a location outside the standard directories in which synthesizers are found automatically. This function is called 
+	prior to the synthesizer's bundle being unloaded, usually as a result of the client calling SpeechSynthesisUnregisterModuleURL. 
+	
+	When called, the synthesizer should remove any run loops and threads created by the bundle so that its code can be removed 
+	from memory and the executable file closed. If the synthesizer was successful in preparing for unloading, then return 0 (zero);
+	otherwise, return -1.
+*/
+	return [GoogleTTSSynthesizer willUnloadBundle];
+}
+
+/******************** CF based calls **********************/
+
+#if !SYNTHESIZER_USES_BUFFER_API
+
+/* Must also be able to parse and handle the embedded commands defined in Inside Macintosh: Speech */
+long 	SESpeakCFString( SpeechChannelIdentifier ssr, CFStringRef text, CFDictionaryRef options )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    //	synthNotReady		-242	Speech synthesizer is still busy speaking 
+	
+	return [(GoogleTTSSynthesizer *)ssr 
+			   startSpeaking:text 
+			   noEndingProsody:[[(NSDictionary*)options objectForKey:(NSString *)kSpeechNoEndingProsody] boolValue]
+			   noInterrupt:[[(NSDictionary*)options objectForKey:(NSString *)kSpeechNoSpeechInterrupt] boolValue]
+			   preflight:[[(NSDictionary*)options objectForKey:(NSString *)kSpeechPreflightThenPause] boolValue]];
+} 
+
+long 	SECopyPhonemesFromText 	( SpeechChannelIdentifier ssr, CFStringRef text, CFStringRef * phonemes)
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+
+    return [(GoogleTTSSynthesizer *)ssr copyPhonemes:text result:phonemes];
+} 
+
+long 	SEUseSpeechDictionary( SpeechChannelIdentifier ssr, CFDictionaryRef speechDictionary )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    //	bufTooSmall			-243	Output buffer is too small to hold result 
+    //	badDictFormat		-246	Pronunciation dictionary format error 
+
+    return [(GoogleTTSSynthesizer *)ssr useDictionary:speechDictionary];
+} 
+
+/* 
+    Pass back the information for the designated speech channel and selector
+*/
+long 	SECopySpeechProperty( SpeechChannelIdentifier ssr, CFStringRef property, CFTypeRef * object )
+{
+    // This routine is required to support the following properties:
+    // kSpeechStatusProperty
+    // kSpeechErrorsProperty
+    // kSpeechInputModeProperty
+    // kSpeechCharacterModeProperty
+    // kSpeechNumberModeProperty
+    // kSpeechRateProperty  
+    // kSpeechPitchBaseProperty
+    // kSpeechPitchModProperty
+    // kSpeechVolumeProperty
+    // kSpeechSynthesizerInfoProperty
+    // kSpeechRecentSyncProperty
+    // kSpeechPhonemeSymbolsProperty
+	//
+    // NOTE: kSpeechCurrentVoiceProperty is automatically handled by the API
+    //
+
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	siUnknownInfoType	-231	Feature not implemented on synthesizer, Unknown type of information 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+
+    return [(GoogleTTSSynthesizer *)ssr copyProperty:property result:object];
+} 
+
+/*
+    Set the information for the designated speech channel and selector
+*/
+long 	SESetSpeechProperty( SpeechChannelIdentifier ssr, CFStringRef property, CFTypeRef object)
+{
+    // This routine is required to support the following properties:
+    // kSpeechCharacterModeProperty
+    // kSpeechNumberModeProperty
+    // kSpeechRateProperty  
+    // kSpeechPitchBaseProperty
+    // kSpeechPitchModProperty
+    // kSpeechVolumeProperty
+    // kSpeechCommandDelimiterProperty
+    // kSpeechResetProperty 
+    // kSpeechRefConProperty
+    // kSpeechTextDoneCallBack
+    // kSpeechSpeechDoneCallBack
+    // kSpeechSyncCallBack  
+    // kSpeechPhonemeCallBack
+    // kSpeechErrorCFCallBack
+    // kSpeechWordCFCallBack
+    // kSpeechOutputToFileURLProperty
+	//
+    // NOTE: Setting kSpeechCurrentVoiceProperty is automatically converted to a SEUseVoice call.
+	//
+
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	siUnknownInfoType	-231	Feature not implemented on synthesizer, Unknown type of information 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+
+    return [(GoogleTTSSynthesizer *)ssr setProperty:property value:object];
+} 
+
+/*************************** Buffer based calls ***********************************/
+
+#else /* SYNTHESIZER_USES_BUFFER_API */
+
+long 	SESpeechStatus( SpeechChannelIdentifier ssr, SpeechStatusInfo * status )
+{	
+	return SEGetSpeechInfo(ssr, soStatus, status);
+} 
+
+/* Must also be able to parse and handle the embedded commands defined in Inside Macintosh: Speech */
+long 	SESpeakBuffer( SpeechChannelIdentifier ssr, Ptr textBuf, long byteLen, long controlFlags )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    //	synthNotReady		-242	Speech synthesizer is still busy speaking 
+	CFStringEncoding	encoding = [(MorseSynthesizer *)ssr stringEncodingForBuffer];
+	CFStringRef			cfString = 
+		CFStringCreateWithBytes(NULL, (UInt8 *)textBuf, byteLen, encoding, false);
+	long 				result   =
+		[(MorseSynthesizer *)ssr 
+			startSpeaking:cfString 
+			noEndingProsody:((controlFlags & kNoEndingProsody) != 0)
+			noInterrupt:((controlFlags & kNoSpeechInterrupt) != 0)
+			preflight:((controlFlags & kPreflightThenPause) != 0)];
+	CFRelease(cfString);
+
+	return result;
+} 
+
+long 	SETextToPhonemes( SpeechChannelIdentifier ssr, char* textBuf, long textBytes, void** phonemeBuf, long* phonBytes)
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+	CFStringRef			phon;
+	CFStringEncoding	encoding = [(MorseSynthesizer *)ssr stringEncodingForBuffer];
+	CFStringRef			cfString = 
+		CFStringCreateWithBytes(NULL, (UInt8 *)textBuf, textBytes, encoding, false);
+    long error = [(MorseSynthesizer *)ssr copyPhonemes:cfString result:&phon];
+	CFRelease(cfString);
+	if (error)
+		return error;
+	CFIndex   len = CFStringGetLength(phon);
+	CFIndex   max = CFStringGetMaximumSizeForEncoding(len, kCFStringEncodingMacRoman);
+	UInt8 *   buf = (UInt8 *)malloc(max);
+	CFStringGetBytes(phon, CFRangeMake(0, len), kCFStringEncodingMacRoman, ' ', false, buf, max, &len);
+	*phonemeBuf	  = buf;
+	*phonBytes    = len;
+	
+	return noErr;
+} 
+
+long 	SEUseDictionary( SpeechChannelIdentifier ssr, void* dictionary, long dictLength )
+{
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+    //	bufTooSmall			-243	Output buffer is too small to hold result 
+    //	badDictFormat		-246	Pronunciation dictionary format error 
+	
+    return [(MorseSynthesizer *)ssr useDictionary:dictionary length:dictLength];
+} 
+
+/* 
+ Pass back the information for the designated speech channel and selector
+ */
+long 	SEGetSpeechInfo( SpeechChannelIdentifier ssr, unsigned long selector, void* speechInfo )
+{
+    // This routine is required to support the following selectors:
+    //	soStatus                      = 'stat'
+    //	soErrors                      = 'erro'
+    //	soInputMode                   = 'inpt'
+    //	soCharacterMode               = 'char'
+    //	soNumberMode                  = 'nmbr'
+    //	soRate                        = 'rate'
+    //	soPitchBase                   = 'pbas'
+    //	soPitchMod                    = 'pmod'
+    //	soVolume                      = 'volm'
+    //	soSynthType                   = 'vers'
+    //	soRecentSync                  = 'sync'
+    //	soPhonemeSymbols              = 'phsy'
+	//
+	// Optionally, you may support the following selector:
+    //	soSynthExtension              = 'xtnd'
+    //
+    // NOTE: 	The selector soCurrentVoice is automatically handled by the API,
+    // 			and selectors soCurrentA5, soSoundOutput are no longer necessary under Mac OS X.
+    //
+    //			The soPhonemeSymbols selector is passed as soPhonemeSymbolsPtr ('phsp'); speechInfo passes a pointer to a (void *)
+    //			The engine has to allocate a sufficiently sized area with malloc(), fill it in, and store it into 
+    //			*(void **)speechInfo. The API will dispose the memory. The call is rarely used and can probably be left unimplemented. 
+	
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	siUnknownInfoType	-231	Feature not implemented on synthesizer, Unknown type of information 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+    return [(MorseSynthesizer *)ssr getSpeechInfo:selector result:speechInfo];
+} 
+
+/*
+ Set the information for the designated speech channel and selector
+ */
+long 	SESetSpeechInfo( SpeechChannelIdentifier ssr, unsigned long selector, void* speechInfo )
+{
+    // This routine should support the following selectors:
+    //	soInputMode                   = 'inpt'
+    //	soCharacterMode               = 'char'
+    //	soNumberMode                  = 'nmbr'
+    //	soRate                        = 'rate'
+    //	soPitchBase                   = 'pbas'
+    //	soPitchMod                    = 'pmod'
+    //	soVolume                      = 'volm'
+    //	soCommandDelimiter            = 'dlim'
+    //	soReset                       = 'rset'
+    //	soRefCon                      = 'refc'
+    //	soTextDoneCallBack            = 'tdcb'
+    //	soSpeechDoneCallBack          = 'sdcb'
+    //	soSyncCallBack                = 'sycb'
+    //	soErrorCallBack               = 'ercb'
+    //	soPhonemeCallBack             = 'phcb'
+    //	soWordCallBack                = 'wdcb'
+    //	soOutputToFileWithCFURL 	  = 'opaf' 		Pass a CFURLRef to write to this file, NULL to generate sound
+	//
+	//  Optionally, you may support the following extension:
+    //	soSynthExtension              = 'xtnd'
+    //
+    // NOTE: 	The selector soCurrentVoice is automatically handled by the API,
+    // 			and selectors soCurrentA5, soSoundOutput are no longer necessary under Mac OS X.
+	
+    // This routine normally returns one of the following values:
+    //	noErr				0		No error 
+    //	paramErr			-50		Invalid value passed in a parameter. Your application passed an invalid parameter for dialog options. 
+    //	siUnknownInfoType	-231	Feature not implemented on synthesizer, Unknown type of information 
+    //	noSynthFound		-240	Could not find the specified speech synthesizer 
+	
+    return [(MorseSynthesizer *)ssr setSpeechInfo:selector info:speechInfo];
+} 
+
+#endif
+

Elizabeth/Elizabeth-Info.plist

+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+	<key>CFBundleDevelopmentRegion</key>
+	<string>English</string>
+	<key>CFBundleExecutable</key>
+	<string>${EXECUTABLE_NAME}</string>
+	<key>CFBundleIconFile</key>
+	<string></string>
+	<key>CFBundleIdentifier</key>
+	<string>net.vucica.${PRODUCT_NAME:rfc1034identifier}</string>
+	<key>CFBundleInfoDictionaryVersion</key>
+	<string>6.0</string>
+	<key>CFBundleName</key>
+	<string>${PRODUCT_NAME}</string>
+	<key>CFBundlePackageType</key>
+	<string>BNDL</string>
+	<key>CFBundleShortVersionString</key>
+	<string>1.0</string>
+	<key>CFBundleSignature</key>
+	<string>????</string>
+	<key>CFBundleVersion</key>
+	<string>1</string>
+	<key>CFPlugInDynamicRegisterFunction</key>
+	<string></string>
+	<key>CFPlugInDynamicRegistration</key>
+	<string>NO</string>
+	<key>CFPlugInFactories</key>
+	<dict>
+		<key>00000000-0000-0000-0000-000000000000</key>
+		<string>MyFactoryFunction</string>
+	</dict>
+	<key>CFPlugInTypes</key>
+	<dict>
+		<key>00000000-0000-0000-0000-000000000000</key>
+		<array>
+			<string>00000000-0000-0000-0000-000000000000</string>
+		</array>
+	</dict>
+	<key>CFPlugInUnloadFunction</key>
+	<string></string>
+	<key>NSHumanReadableCopyright</key>
+	<string>Copyright © 2012. Ivan Vučica. All rights reserved.</string>
+#ifdef VOICE_INFO_IN_PLIST
+#include VOICE_PLIST
+#endif
+</dict>
+</plist>

Elizabeth/Elizabeth-Prefix.pch

+//
+// Prefix header for all source files of the 'GoogleElizabeth' target in the 'GoogleElizabeth' project
+//
+
+#ifdef __OBJC__
+    #import <Cocoa/Cocoa.h>
+#endif

Elizabeth/Elizabeth.vd

+#
+# File: Elizabeth.vd
+# 
+# Abstract: Voice Description for voice in modern format
+#
+# Version: 1.0
+# 
+# Modified by Ivan Vučica from "Samuel.vd" in MorseSynthesizer example.
+# Modifications Copyright (c) 2012 Ivan Vučica
+#
+# Disclaimer: IMPORTANT:  This Apple software is supplied to you by 
+# Apple Inc. ("Apple") in consideration of your agreement to the
+# following terms, and your use, installation, modification or
+# redistribution of this Apple software constitutes acceptance of these
+# terms.  If you do not agree with these terms, please do not use,
+# install, modify or redistribute this Apple software.
+# 
+# In consideration of your agreement to abide by the following terms, and
+# subject to these terms, Apple grants you a personal, non-exclusive
+# license, under Apple's copyrights in this original Apple software (the
+# "Apple Software"), to use, reproduce, modify and redistribute the Apple
+# Software, with or without modifications, in source and/or binary forms;
+# provided that if you redistribute the Apple Software in its entirety and
+# without modifications, you must retain this notice and the following
+# text and disclaimers in all such redistributions of the Apple Software. 
+# Neither the name, trademarks, service marks or logos of Apple Inc. 
+# may be used to endorse or promote products derived from the Apple
+# Software without specific prior written permission from Apple.  Except
+# as expressly stated in this notice, no other rights or licenses, express
+# or implied, are granted by Apple herein, including but not limited to
+# any patent rights that may be infringed by your derivative works or by
+# other works in which the Apple Software may be incorporated.
+# 
+# The Apple Software is provided by Apple on an "AS IS" basis.  APPLE
+# MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
+# THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
+# OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
+# 
+# IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
+# MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
+# AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
+# STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+# 
+# Copyright (C) 2011 Apple Inc. All Rights Reserved.
+#
+voice_creator	= 'gtts'
+voice_id	= 950
+version         = 1
+name            = 'Elizabeth'
+demo_text       = 'Looks like I am no longer trapped being a translator.'
+gender          = :female
+age             = 30
+#
+# Script codes are one of :roman :utf16 :utf8 or a numeric code as defined in smXXX in 
+# /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/CarbonCore.framework/Versions/A/Headers/Script.h
+#
+script          = :utf8
+#
+# Language and region codes are numeric as defined in langXXX and verXXX in 
+# /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/CarbonCore.framework/Versions/A/Headers/Script.h
+#
+language        = 0
+region          = 0
+#
+# For modern voices, use a locale identifier instead
+#
+locale		= 'en_US'
+#
+# Hints to voiceover as to the extent of our character set
+#
+supportedCharacters          = [[33,126],[0xC0,0xFF]]
+individuallySpokenCharacters = [[33,126],[0xC0,0xFF]]

Elizabeth/en.lproj/InfoPlist.strings

+/* Localized versions of Info.plist keys */
+

GoogleTTS.xcodeproj/project.pbxproj

 /* Begin PBXBuildFile section */
 		7F86A3C815CD3A5300E1ECEB /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7F86A3C715CD3A5300E1ECEB /* CoreFoundation.framework */; };
 		7F86A3CE15CD3A5300E1ECEB /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 7F86A3CC15CD3A5300E1ECEB /* InfoPlist.strings */; };
+		7F86A3D915CD3B1E00E1ECEB /* SynthesizerAPI.m in Sources */ = {isa = PBXBuildFile; fileRef = 7F86A3D815CD3B1E00E1ECEB /* SynthesizerAPI.m */; };
+		7F86A3E115CD3CD300E1ECEB /* GoogleTTSSynthesizer.m in Sources */ = {isa = PBXBuildFile; fileRef = 7F86A3E015CD3CD300E1ECEB /* GoogleTTSSynthesizer.m */; };
+		7F86A3E415CD479A00E1ECEB /* GoogleTTSSynthesizerCF.m in Sources */ = {isa = PBXBuildFile; fileRef = 7F86A3E315CD479A00E1ECEB /* GoogleTTSSynthesizerCF.m */; };
+		7F86A3E615CD490E00E1ECEB /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7F86A3E515CD490E00E1ECEB /* AVFoundation.framework */; };
+		7F86A3E815CD491300E1ECEB /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7F86A3E715CD491300E1ECEB /* Foundation.framework */; };
+		7F86A3EA15CD491F00E1ECEB /* ApplicationServices.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 7F86A3E915CD491F00E1ECEB /* ApplicationServices.framework */; };
+		7F86A40515CD5BE500E1ECEB /* problem.mp3 in Resources */ = {isa = PBXBuildFile; fileRef = 7F86A40415CD5BE500E1ECEB /* problem.mp3 */; };
 /* End PBXBuildFile section */
 
+/* Begin PBXContainerItemProxy section */
+		7F86A3FF15CD4F8200E1ECEB /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 7F86A3BB15CD3A5300E1ECEB /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 7F86A3F015CD498A00E1ECEB;
+			remoteInfo = Elizabeth;
+		};
+		7F86A41415CD5F4400E1ECEB /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 7F86A3BB15CD3A5300E1ECEB /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 7F86A40715CD5D6C00E1ECEB;
+			remoteInfo = Miku;
+		};
+/* End PBXContainerItemProxy section */
+
 /* Begin PBXFileReference section */
-		7F86A3C415CD3A5300E1ECEB /* GoogleTTS.bundle */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GoogleTTS.bundle; sourceTree = BUILT_PRODUCTS_DIR; };
+		7F86A3C415CD3A5300E1ECEB /* GoogleTTS.SpeechSynthesizer */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GoogleTTS.SpeechSynthesizer; sourceTree = BUILT_PRODUCTS_DIR; };
 		7F86A3C715CD3A5300E1ECEB /* CoreFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreFoundation.framework; path = System/Library/Frameworks/CoreFoundation.framework; sourceTree = SDKROOT; };
 		7F86A3CB15CD3A5300E1ECEB /* GoogleTTS-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "GoogleTTS-Info.plist"; sourceTree = "<group>"; };
 		7F86A3CD15CD3A5300E1ECEB /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; };
 		7F86A3CF15CD3A5300E1ECEB /* GoogleTTS-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "GoogleTTS-Prefix.pch"; sourceTree = "<group>"; };
+		7F86A3D715CD3B1E00E1ECEB /* SpeechEngine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SpeechEngine.h; sourceTree = "<group>"; };
+		7F86A3D815CD3B1E00E1ECEB /* SynthesizerAPI.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = SynthesizerAPI.m; sourceTree = "<group>"; };
+		7F86A3DE15CD3BD600E1ECEB /* Elizabeth.vd */ = {isa = PBXFileReference; lastKnownFileType = text; path = Elizabeth.vd; sourceTree = "<group>"; };
+		7F86A3DF15CD3CD300E1ECEB /* GoogleTTSSynthesizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GoogleTTSSynthesizer.h; sourceTree = "<group>"; };
+		7F86A3E015CD3CD300E1ECEB /* GoogleTTSSynthesizer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GoogleTTSSynthesizer.m; sourceTree = "<group>"; };
+		7F86A3E215CD479A00E1ECEB /* GoogleTTSSynthesizerCF.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GoogleTTSSynthesizerCF.h; sourceTree = "<group>"; };
+		7F86A3E315CD479A00E1ECEB /* GoogleTTSSynthesizerCF.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = GoogleTTSSynthesizerCF.m; sourceTree = "<group>"; };
+		7F86A3E515CD490E00E1ECEB /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };
+		7F86A3E715CD491300E1ECEB /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };
+		7F86A3E915CD491F00E1ECEB /* ApplicationServices.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = ApplicationServices.framework; path = System/Library/Frameworks/ApplicationServices.framework; sourceTree = SDKROOT; };
+		7F86A3F115CD498A00E1ECEB /* Elizabeth.SpeechVoice */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = Elizabeth.SpeechVoice; sourceTree = BUILT_PRODUCTS_DIR; };
+		7F86A3F515CD498A00E1ECEB /* Elizabeth-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = "Elizabeth-Info.plist"; sourceTree = "<group>"; };
+		7F86A3F715CD498A00E1ECEB /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = "<group>"; };
+		7F86A3F915CD498A00E1ECEB /* Elizabeth-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "Elizabeth-Prefix.pch"; sourceTree = "<group>"; };
+		7F86A40115CD54C400E1ECEB /* BuildVoiceDescription */ = {isa = PBXFileReference; lastKnownFileType = text.script.ruby; name = BuildVoiceDescription; path = Scripts/BuildVoiceDescription; sourceTree = SOURCE_ROOT; };
+		7F86A40415CD5BE500E1ECEB /* problem.mp3 */ = {isa = PBXFileReference; lastKnownFileType = audio.mp3; path = problem.mp3; sourceTree = SOURCE_ROOT; };
+		7F86A40C15CD5D6C00E1ECEB /* Miku.SpeechVoice */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = Miku.SpeechVoice; sourceTree = BUILT_PRODUCTS_DIR; };
+		7F86A41015CD5DD100E1ECEB /* ja */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = ja; path = ja.lproj/InfoPlist.strings; sourceTree = "<group>"; };
+		7F86A41115CD5DD100E1ECEB /* Miku-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "Miku-Info.plist"; sourceTree = "<group>"; };
+		7F86A41215CD5DD100E1ECEB /* Miku-Prefix.pch */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "Miku-Prefix.pch"; sourceTree = "<group>"; };
+		7F86A41315CD5DD100E1ECEB /* Miku.vd */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = Miku.vd; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
 			isa = PBXFrameworksBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
+				7F86A3EA15CD491F00E1ECEB /* ApplicationServices.framework in Frameworks */,
+				7F86A3E815CD491300E1ECEB /* Foundation.framework in Frameworks */,
+				7F86A3E615CD490E00E1ECEB /* AVFoundation.framework in Frameworks */,
 				7F86A3C815CD3A5300E1ECEB /* CoreFoundation.framework in Frameworks */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		7F86A3B915CD3A5300E1ECEB = {
 			isa = PBXGroup;
 			children = (
+				7F86A3E915CD491F00E1ECEB /* ApplicationServices.framework */,
+				7F86A3E715CD491300E1ECEB /* Foundation.framework */,
+				7F86A3E515CD490E00E1ECEB /* AVFoundation.framework */,
+				7F86A3D615CD3B1E00E1ECEB /* API */,
 				7F86A3C915CD3A5300E1ECEB /* GoogleTTS */,
+				7F86A3F315CD498A00E1ECEB /* Elizabeth */,
+				7F86A40E15CD5DD100E1ECEB /* Miku */,
 				7F86A3C615CD3A5300E1ECEB /* Frameworks */,
 				7F86A3C515CD3A5300E1ECEB /* Products */,
 			);
 		7F86A3C515CD3A5300E1ECEB /* Products */ = {
 			isa = PBXGroup;
 			children = (
-				7F86A3C415CD3A5300E1ECEB /* GoogleTTS.bundle */,
+				7F86A3C415CD3A5300E1ECEB /* GoogleTTS.SpeechSynthesizer */,
+				7F86A3F115CD498A00E1ECEB /* Elizabeth.SpeechVoice */,
+				7F86A40C15CD5D6C00E1ECEB /* Miku.SpeechVoice */,
 			);
 			name = Products;
 			sourceTree = "<group>";
 			isa = PBXGroup;
 			children = (
 				7F86A3CA15CD3A5300E1ECEB /* Supporting Files */,
+				7F86A3DF15CD3CD300E1ECEB /* GoogleTTSSynthesizer.h */,
+				7F86A3E015CD3CD300E1ECEB /* GoogleTTSSynthesizer.m */,
+				7F86A3E215CD479A00E1ECEB /* GoogleTTSSynthesizerCF.h */,
+				7F86A3E315CD479A00E1ECEB /* GoogleTTSSynthesizerCF.m */,
 			);
 			path = GoogleTTS;
 			sourceTree = "<group>";
 				7F86A3CB15CD3A5300E1ECEB /* GoogleTTS-Info.plist */,
 				7F86A3CC15CD3A5300E1ECEB /* InfoPlist.strings */,
 				7F86A3CF15CD3A5300E1ECEB /* GoogleTTS-Prefix.pch */,
+				7F86A40415CD5BE500E1ECEB /* problem.mp3 */,
 			);
 			name = "Supporting Files";
 			sourceTree = "<group>";
 		};
+		7F86A3D615CD3B1E00E1ECEB /* API */ = {
+			isa = PBXGroup;
+			children = (
+				7F86A3D715CD3B1E00E1ECEB /* SpeechEngine.h */,
+				7F86A3D815CD3B1E00E1ECEB /* SynthesizerAPI.m */,
+			);
+			path = API;
+			sourceTree = "<group>";
+		};
+		7F86A3F315CD498A00E1ECEB /* Elizabeth */ = {
+			isa = PBXGroup;
+			children = (
+				7F86A3DE15CD3BD600E1ECEB /* Elizabeth.vd */,
+				7F86A3F415CD498A00E1ECEB /* Supporting Files */,
+			);
+			path = Elizabeth;
+			sourceTree = "<group>";
+		};
+		7F86A3F415CD498A00E1ECEB /* Supporting Files */ = {
+			isa = PBXGroup;
+			children = (
+				7F86A40115CD54C400E1ECEB /* BuildVoiceDescription */,
+				7F86A3F515CD498A00E1ECEB /* Elizabeth-Info.plist */,
+				7F86A3F615CD498A00E1ECEB /* InfoPlist.strings */,
+				7F86A3F915CD498A00E1ECEB /* Elizabeth-Prefix.pch */,
+			);
+			name = "Supporting Files";
+			sourceTree = "<group>";
+		};
+		7F86A40E15CD5DD100E1ECEB /* Miku */ = {
+			isa = PBXGroup;
+			children = (
+				7F86A40F15CD5DD100E1ECEB /* InfoPlist.strings */,
+				7F86A41115CD5DD100E1ECEB /* Miku-Info.plist */,
+				7F86A41215CD5DD100E1ECEB /* Miku-Prefix.pch */,
+				7F86A41315CD5DD100E1ECEB /* Miku.vd */,
+			);
+			path = Miku;
+			sourceTree = "<group>";
+		};
 /* End PBXGroup section */
 
+/* Begin PBXLegacyTarget section */
+		7F86A3DA15CD3B9000E1ECEB /* BuildVoiceDescription */ = {
+			isa = PBXLegacyTarget;
+			buildArgumentsString = "--xml Elizabeth/Elizabeth.vd \"$(BUILT_PRODUCTS_DIR)\"/ElizabethVoiceAttributes";
+			buildConfigurationList = 7F86A3DB15CD3B9000E1ECEB /* Build configuration list for PBXLegacyTarget "BuildVoiceDescription" */;
+			buildPhases = (
+			);
+			buildToolPath = ./Scripts/BuildVoiceDescription;
+			buildWorkingDirectory = "";
+			dependencies = (
+			);
+			name = BuildVoiceDescription;
+			passBuildSettingsInEnvironment = 1;
+			productName = BuildVoiceDescription;
+		};
+/* End PBXLegacyTarget section */
+
 /* Begin PBXNativeTarget section */
 		7F86A3C315CD3A5300E1ECEB /* GoogleTTS */ = {
 			isa = PBXNativeTarget;
 			buildRules = (
 			);
 			dependencies = (
+				7F86A41515CD5F4400E1ECEB /* PBXTargetDependency */,
+				7F86A40015CD4F8200E1ECEB /* PBXTargetDependency */,
 			);
 			name = GoogleTTS;
 			productName = GoogleTTS;
-			productReference = 7F86A3C415CD3A5300E1ECEB /* GoogleTTS.bundle */;
+			productReference = 7F86A3C415CD3A5300E1ECEB /* GoogleTTS.SpeechSynthesizer */;
+			productType = "com.apple.product-type.bundle";
+		};
+		7F86A3F015CD498A00E1ECEB /* Elizabeth */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 7F86A3FA15CD498A00E1ECEB /* Build configuration list for PBXNativeTarget "Elizabeth" */;
+			buildPhases = (
+				7F86A40615CD5CF100E1ECEB /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = Elizabeth;
+			productName = GoogleElizabeth;
+			productReference = 7F86A3F115CD498A00E1ECEB /* Elizabeth.SpeechVoice */;
+			productType = "com.apple.product-type.bundle";
+		};
+		7F86A40715CD5D6C00E1ECEB /* Miku */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 7F86A40915CD5D6C00E1ECEB /* Build configuration list for PBXNativeTarget "Miku" */;
+			buildPhases = (
+				7F86A40815CD5D6C00E1ECEB /* ShellScript */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+			);
+			name = Miku;
+			productName = GoogleElizabeth;
+			productReference = 7F86A40C15CD5D6C00E1ECEB /* Miku.SpeechVoice */;
 			productType = "com.apple.product-type.bundle";
 		};
 /* End PBXNativeTarget section */
 			hasScannedForEncodings = 0;
 			knownRegions = (
 				en,
+				ja,
 			);
 			mainGroup = 7F86A3B915CD3A5300E1ECEB;
 			productRefGroup = 7F86A3C515CD3A5300E1ECEB /* Products */;
 			projectRoot = "";
 			targets = (
 				7F86A3C315CD3A5300E1ECEB /* GoogleTTS */,
+				7F86A3DA15CD3B9000E1ECEB /* BuildVoiceDescription */,
+				7F86A3F015CD498A00E1ECEB /* Elizabeth */,
+				7F86A40715CD5D6C00E1ECEB /* Miku */,
 			);
 		};
 /* End PBXProject section */
 			buildActionMask = 2147483647;
 			files = (
 				7F86A3CE15CD3A5300E1ECEB /* InfoPlist.strings in Resources */,
+				7F86A40515CD5BE500E1ECEB /* problem.mp3 in Resources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
 /* End PBXResourcesBuildPhase section */
 
+/* Begin PBXShellScriptBuildPhase section */
+		7F86A40615CD5CF100E1ECEB /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "./Scripts/BuildVoiceDescription --xml Elizabeth/Elizabeth.vd \"${BUILT_PRODUCTS_DIR}\"/ElizabethVoiceAttributes\n";
+		};
+		7F86A40815CD5D6C00E1ECEB /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "./Scripts/BuildVoiceDescription --xml Miku/Miku.vd \"${BUILT_PRODUCTS_DIR}\"/MikuVoiceAttributes\n";
+		};
+/* End PBXShellScriptBuildPhase section */
+
 /* Begin PBXSourcesBuildPhase section */
 		7F86A3C015CD3A5300E1ECEB /* Sources */ = {
 			isa = PBXSourcesBuildPhase;
 			buildActionMask = 2147483647;
 			files = (
+				7F86A3D915CD3B1E00E1ECEB /* SynthesizerAPI.m in Sources */,
+				7F86A3E115CD3CD300E1ECEB /* GoogleTTSSynthesizer.m in Sources */,
+				7F86A3E415CD479A00E1ECEB /* GoogleTTSSynthesizerCF.m in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
 /* End PBXSourcesBuildPhase section */
 
+/* Begin PBXTargetDependency section */
+		7F86A40015CD4F8200E1ECEB /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 7F86A3F015CD498A00E1ECEB /* Elizabeth */;
+			targetProxy = 7F86A3FF15CD4F8200E1ECEB /* PBXContainerItemProxy */;
+		};
+		7F86A41515CD5F4400E1ECEB /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 7F86A40715CD5D6C00E1ECEB /* Miku */;
+			targetProxy = 7F86A41415CD5F4400E1ECEB /* PBXContainerItemProxy */;
+		};
+/* End PBXTargetDependency section */
+
 /* Begin PBXVariantGroup section */
 		7F86A3CC15CD3A5300E1ECEB /* InfoPlist.strings */ = {
 			isa = PBXVariantGroup;
 			name = InfoPlist.strings;
 			sourceTree = "<group>";
 		};
+		7F86A3F615CD498A00E1ECEB /* InfoPlist.strings */ = {
+			isa = PBXVariantGroup;
+			children = (
+				7F86A3F715CD498A00E1ECEB /* en */,
+			);
+			name = InfoPlist.strings;
+			sourceTree = "<group>";
+		};
+		7F86A40F15CD5DD100E1ECEB /* InfoPlist.strings */ = {
+			isa = PBXVariantGroup;
+			children = (
+				7F86A41015CD5DD100E1ECEB /* ja */,
+			);
+			name = InfoPlist.strings;
+			sourceTree = "<group>";
+		};
 /* End PBXVariantGroup section */
 
 /* Begin XCBuildConfiguration section */
 			isa = XCBuildConfiguration;
 			buildSettings = {
 				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
 				CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
 				CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
 				COPY_PHASE_STRIP = NO;
 			isa = XCBuildConfiguration;
 			buildSettings = {
 				ALWAYS_SEARCH_USER_PATHS = NO;
-				ARCHS = "$(ARCHS_STANDARD_64_BIT)";
+				ARCHS = "$(ARCHS_STANDARD_32_64_BIT)";
 				CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
 				CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
 				COPY_PHASE_STRIP = YES;
 				GCC_PRECOMPILE_PREFIX_HEADER = YES;
 				GCC_PREFIX_HEADER = "GoogleTTS/GoogleTTS-Prefix.pch";
 				INFOPLIST_FILE = "GoogleTTS/GoogleTTS-Info.plist";
-				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Bundles";
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Synthesizers";
 				PRODUCT_NAME = "$(TARGET_NAME)";
-				WRAPPER_EXTENSION = bundle;
+				WRAPPER_EXTENSION = SpeechSynthesizer;
 			};
 			name = Debug;
 		};
 				GCC_PRECOMPILE_PREFIX_HEADER = YES;
 				GCC_PREFIX_HEADER = "GoogleTTS/GoogleTTS-Prefix.pch";
 				INFOPLIST_FILE = "GoogleTTS/GoogleTTS-Info.plist";
-				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Bundles";
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Synthesizers";
+				PRODUCT_NAME = "$(TARGET_NAME)";
+				WRAPPER_EXTENSION = SpeechSynthesizer;
+			};
+			name = Release;
+		};
+		7F86A3DC15CD3B9000E1ECEB /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				DEBUGGING_SYMBOLS = YES;
+				GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
+				GCC_OPTIMIZATION_LEVEL = 0;
+				OTHER_CFLAGS = "";
+				OTHER_LDFLAGS = "";
+				PRODUCT_NAME = "$(TARGET_NAME)";
+			};
+			name = Debug;
+		};
+		7F86A3DD15CD3B9000E1ECEB /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				OTHER_CFLAGS = "";
+				OTHER_LDFLAGS = "";
+				PRODUCT_NAME = "$(TARGET_NAME)";
+			};
+			name = Release;
+		};
+		7F86A3FB15CD498A00E1ECEB /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COMBINE_HIDPI_IMAGES = YES;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "Elizabeth/Elizabeth-Prefix.pch";
+				INFOPLIST_FILE = "Elizabeth/Elizabeth-Info.plist";
+				INFOPLIST_PREPROCESS = YES;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = (
+					VOICE_INFO_IN_PLIST,
+					"VOICE_PLIST=\\\"\"${BUILT_PRODUCTS_DIR}\"/ElizabethVoiceAttributes\\\"",
+				);
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Voices";
+				PRODUCT_NAME = "$(TARGET_NAME)";
+				WRAPPER_EXTENSION = SpeechVoice;
+			};
+			name = Debug;
+		};
+		7F86A3FC15CD498A00E1ECEB /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COMBINE_HIDPI_IMAGES = YES;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "Elizabeth/Elizabeth-Prefix.pch";
+				INFOPLIST_FILE = "Elizabeth/Elizabeth-Info.plist";
+				INFOPLIST_PREPROCESS = YES;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = (
+					VOICE_INFO_IN_PLIST,
+					"VOICE_PLIST=\\\"\"${BUILT_PRODUCTS_DIR}\"/ElizabethVoiceAttributes\\\"",
+				);
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Voices";
 				PRODUCT_NAME = "$(TARGET_NAME)";
-				WRAPPER_EXTENSION = bundle;
+				WRAPPER_EXTENSION = SpeechVoice;
+			};
+			name = Release;
+		};
+		7F86A40A15CD5D6C00E1ECEB /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COMBINE_HIDPI_IMAGES = YES;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "Elizabeth/Elizabeth-Prefix.pch";
+				INFOPLIST_FILE = "Miku/Miku-Info.plist";
+				INFOPLIST_PREPROCESS = YES;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = (
+					VOICE_INFO_IN_PLIST,
+					"VOICE_PLIST=\\\"\"${BUILT_PRODUCTS_DIR}\"/MikuVoiceAttributes\\\"",
+				);
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Voices";
+				PRODUCT_NAME = Miku;
+				WRAPPER_EXTENSION = SpeechVoice;
+			};
+			name = Debug;
+		};
+		7F86A40B15CD5D6C00E1ECEB /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				COMBINE_HIDPI_IMAGES = YES;
+				GCC_PRECOMPILE_PREFIX_HEADER = YES;
+				GCC_PREFIX_HEADER = "Elizabeth/Elizabeth-Prefix.pch";
+				INFOPLIST_FILE = "Miku/Miku-Info.plist";
+				INFOPLIST_PREPROCESS = YES;
+				INFOPLIST_PREPROCESSOR_DEFINITIONS = (
+					VOICE_INFO_IN_PLIST,
+					"VOICE_PLIST=\\\"\"${BUILT_PRODUCTS_DIR}\"/MikuVoiceAttributes\\\"",
+				);
+				INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Speech/Voices";
+				PRODUCT_NAME = Miku;
+				WRAPPER_EXTENSION = SpeechVoice;
 			};
 			name = Release;
 		};
 			);
 			defaultConfigurationIsVisible = 0;
 		};
+		7F86A3DB15CD3B9000E1ECEB /* Build configuration list for PBXLegacyTarget "BuildVoiceDescription" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				7F86A3DC15CD3B9000E1ECEB /* Debug */,
+				7F86A3DD15CD3B9000E1ECEB /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+		};
+		7F86A3FA15CD498A00E1ECEB /* Build configuration list for PBXNativeTarget "Elizabeth" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				7F86A3FB15CD498A00E1ECEB /* Debug */,
+				7F86A3FC15CD498A00E1ECEB /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+		};
+		7F86A40915CD5D6C00E1ECEB /* Build configuration list for PBXNativeTarget "Miku" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				7F86A40A15CD5D6C00E1ECEB /* Debug */,
+				7F86A40B15CD5D6C00E1ECEB /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+		};
 /* End XCConfigurationList section */
 	};
 	rootObject = 7F86A3BB15CD3A5300E1ECEB /* Project object */;

GoogleTTS/GoogleTTS-Info.plist

 	<key>CFBundleShortVersionString</key>
 	<string>1.0</string>
 	<key>CFBundleSignature</key>
-	<string>????</string>
+	<string>GTTS</string>
 	<key>CFBundleVersion</key>
 	<string>1</string>
 	<key>CFPlugInDynamicRegisterFunction</key>
 	<string></string>
 	<key>NSHumanReadableCopyright</key>
 	<string>Copyright © 2012. Ivan Vučica. All rights reserved.</string>
+	<key>SpeechEngineTypeArray</key>
+	<array>
+		<integer>1735685235</integer>
+	</array>
 </dict>
 </plist>

GoogleTTS/GoogleTTSSynthesizer.h

+//
+//  GoogleTTSSynthesizer.h
+//  GoogleTTS
+//
+//  Created by Ivan Vučica on 4.8.2012..
+//  Copyright (c) 2012. Ivan Vučica. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+#include <CoreAudio/CoreAudio.h>
+#include <AudioToolbox/AudioToolbox.h>
+#include <dispatch/dispatch.h>
+@class AudioOutput;
+@class AVAudioPlayer;
+#import <AVFoundation/AVFoundation.h>
+
+@interface GoogleTTSSynthesizer : NSObject<AVAudioPlayerDelegate>
+{
+	//
+	// Synthesizer state (applicable to most synthesizers)
+	//
+	int					synthState;
+	float				speechRate;
+	float				pitchBase;
+	float				volume;
+	CFStringRef			openDelim;
+	CFStringRef			closeDelim;
+	SRefCon				clientRefCon;
+	CFStringRef			textBeingSpoken;
+	
+	//
+	// Audio output state (applicable to most synthesizers)
+	//
+	AudioOutput *		audioOutput;
+	ExtAudioFileRef		audioFileRef;	    // Audio file to save to
+	bool				audioFileOwned;		// Did we open it?
+	AudioDeviceID		audioDevice;		// Audio device to play to	
+	//
+	// Callbacks
+	//
+	SpeechTextDoneProcPtr	textDoneCallback;	// Callback to call when we no longer need the input text
+	SpeechDoneProcPtr		speechDoneCallback;	// Callback to call when we're done
+	SpeechSyncProcPtr		syncCallback;		// Callback to call for sync embedded command
+	SpeechWordCFProcPtr		wordCallback;		// Callback to call for each word 
+	//
+	// We work through a dispatch queue. It's the modern thing to do
+	//
+	dispatch_queue_t	queue;
+	dispatch_source_t	generateSamples;
+    
+    /////////////
+    NSMutableString * textToSpeak;
+    AVAudioPlayer * player;
+    NSString * voiceName;
+}
+
+@property (nonatomic, retain) AVAudioPlayer *player;
+@property (nonatomic, retain) NSString * voiceName;
+
+- (id)init;
+- (void)close;	
+- (long)useVoice:(VoiceSpec *)voice withBundle:(CFBundleRef)inVoiceSpecBundle;
+- (long)startSpeaking:(CFStringRef)text 
+	  noEndingProsody:(BOOL)noEndingProsody noInterrupt:(BOOL)noInterrupt 
+			preflight:(BOOL)preflight;
+- (long)copyPhonemes:(CFStringRef)text result:(CFStringRef *)phonemes;
+- (long)stopSpeakingAt:(unsigned long)whereToStop;
+- (long)pauseSpeakingAt:(unsigned long)whereToStop;
+- (long)continueSpeaking;
+- (CFStringEncoding)stringEncodingForBuffer;
+
++ (long)willUnloadBundle;
+
+// Private
+- (void)createSoundChannel:(BOOL)forAudioUnit;
+- (void)disposeSoundChannel;
+
+
+@end
+
+/*
+ Private: state
+ */
+
+enum SynthState {
+	kSynthStopped,	// Ready for next speech call
+	kSynthRunning,	// Generating audio
+	kSynthPaused,	// More tokens available, but paused
+	kSynthStopping,	// Stop after generating next batch of samples
+	kSynthPausing	// Pause after generating next batch of samples
+};
+
+

GoogleTTS/GoogleTTSSynthesizer.m

+//
+//  GoogleTTSSynthesizer.m
+//  GoogleTTS
+//
+//  Created by Ivan Vučica on 4.8.2012..
+//  Copyright (c) 2012. Ivan Vučica. All rights reserved.
+//
+
+#import "GoogleTTSSynthesizer.h"
+#import <AVFoundation/AVFoundation.h>
+
+@implementation GoogleTTSSynthesizer
+@synthesize player;
+@synthesize voiceName;
+
++ (long)willUnloadBundle
+{
+	return 0; /* We retain no resources that would block unloading us */
+}
+
+- (id)init
+{
+    self = [super init];
+    if(!self)
+        return nil;
+    
+    queue = dispatch_queue_create("GoogleTTSSynthesizer", 0);
+    
+    return self;
+}
+
+- (void)dealloc
+{
+    self.player = nil;
+    self.voiceName = nil;
+    [super dealloc];
+}
+
+- (void)close
+{
+	if (generateSamples)
+		dispatch_source_cancel(generateSamples);
+	dispatch_release(queue);
+	[self release];
+}
+
+- (long)useVoice:(VoiceSpec *)voice withBundle:(CFBundleRef)inVoiceSpecBundle
+{
+	/*
+	 * Set up voice specific information. Normally, we would use a considerable amount of voice
+	 * specific data. 
+     * Google offers no parameters, though.
+	 */
+	VoiceDescription desc;
+	if (!GetVoiceDescription(voice, &desc, sizeof(desc)))
+    {
+        /*
+		switch (desc.gender) {
+            case kMale:
+                speechRate	= 20.0f;
+                pitchBase	= 300.0f;
+                break;
+            case kFemale:
+                speechRate	= 25.0f;
+                pitchBase	= 440.0f;
+                break;
+            case kNeuter:
+                speechRate	= 30.0f;
+                pitchBase	= 360.0f;
+                break;
+		}
+         */
+        char cVoiceName[64];
+        memcpy(cVoiceName, desc.name+1, desc.name[0]);
+        cVoiceName[desc.name[0]] = 0;
+        
+        self.voiceName = [NSString stringWithUTF8String:cVoiceName];
+    }
+	return noErr;
+}
+
+- (BOOL)isActive
+{
+	__block BOOL result;
+	
+	dispatch_sync(queue, ^{
+        result = !(synthState == kSynthStopped || synthState == kSynthPaused);
+    });
+	
+	return result;
+}
+
+
+- (void)encodeText:(CFStringRef)text
+{
+#if 0
+	[tokens clear];
+	if (wordCallback)
+		[tokens wantWordCallbacks:@selector(wordCallback:)];
+#else
+    [textToSpeak release];
+    textToSpeak = [NSMutableString new];
+#endif
+    
+	/*
+	 * Morse code is case insensitive, and so are our embedded commands.
+	 * Most synthesizers require considerably more subtlety than this.
+	 */
+	CFMutableStringRef s = CFStringCreateMutableCopy(NULL, 0, text);
+	CFStringFold(s, kCFCompareCaseInsensitive, NULL);
+	text = s;
+    
+	float	curRate	= speechRate;
+	float	curPitch= pitchBase;
+	float	curVol	= volume;
+    
+	CFRange remainingText = CFRangeMake(0, CFStringGetLength(text));
+	while (openDelim) {
+		CFRange delim;
+		if (!CFStringFindWithOptions(text, openDelim, remainingText, 0, &delim))
+			break;
+        
+		/* Encode text before opening delimiter */
+		CFRange prefix = CFRangeMake(remainingText.location,
+									 delim.location-remainingText.location);
+#if 0
+        // We don't use tokens
+		[tokens encodeText:text range:prefix];
+		[tokens encodeWordBreak];
+#else
+        [textToSpeak appendString:[(NSString*)text substringWithRange:NSMakeRange(prefix.location, prefix.length)]];
+#endif
+        
+		/* Process embedded commands */
+		CFStringRef newOpenDelim	= (CFStringRef)CFRetain(openDelim);
+		CFStringRef newCloseDelim   = (CFStringRef)CFRetain(closeDelim);
+		CFIndex		embedded		= delim.location+delim.length;
+		CFIndex     endEmbedded     = remainingText.location+remainingText.length;
+        
+		delim.length = remainingText.length-(embedded-remainingText.location);
+		if (CFStringFindWithOptions(text, closeDelim, delim, 0, &delim)) {
+			endEmbedded 			= delim.location;
+			remainingText.length 	= (remainingText.location+remainingText.length)-(endEmbedded+delim.length);
+			remainingText.location	= endEmbedded+delim.length;
+		} else {
+			remainingText.length	= 0;
+			remainingText.location	= endEmbedded;
+		}
+        
+		/* We should be reporting errors if we encounter any, but we don't */
+#define FETCH_NEXT_CHAR if (embedded < endEmbedded) ch = CFStringGetCharacterAtIndex(text, embedded++); else ch = ' '
+#define SKIP_SPACES     while (isspace(ch) && embedded<endEmbedded) FETCH_NEXT_CHAR
+#define ERROR			goto skipToNextCommand
+        
+		while (embedded < endEmbedded) {
+			UniChar ch;
+			FETCH_NEXT_CHAR;
+			SKIP_SPACES;
+			char 	selector[5] = {0,0,0,0,0};
+			int  	selIx       = 0;
+			SEL		paramSel;
+			float * curParam;
+			char	relative;
+			char    argument[32];
+			int		argIx;
+			float 	value;
+			while (selIx < 4 && isalpha(ch)) {
+				selector[selIx++] = ch;
+				FETCH_NEXT_CHAR;
+			}
+			/*
+			 * We only handle a small subset of the embedded commands we're
+			 * supposed to handle. You probably get the idea, though.
+			 */
+			if (!strcmp(selector, "cmnt")) {
+				break; /* Comment, skip rest of embedded command */
+			} else if (!strcmp(selector, "dlim")) {
+				/*
+				 * Change embedded command delimiters. The change takes place
+				 * AFTER the current block of embedded commands.
+				 */
+				UniChar odelim[2] = {0,0};
+				UniChar cdelim[2] = {0,0};
+				SKIP_SPACES;
+				if (!isspace(ch) && ch != ';') {
+					odelim[0] = ch;
+					FETCH_NEXT_CHAR;
+					if (!isspace(ch) && ch != ';') {
+						odelim[1] = ch;
+						FETCH_NEXT_CHAR;
+					}
+					SKIP_SPACES;
+				}
+				if (!isspace(ch) && ch != ';') {
+					cdelim[0] = ch;
+					FETCH_NEXT_CHAR;
+					if (!isspace(ch) && ch != ';') {
+						cdelim[1] = ch;
+						FETCH_NEXT_CHAR;
+					}
+				}
+				newOpenDelim = !odelim[0] ? NULL
+                : CFStringCreateWithCharacters(NULL, odelim, 1+(odelim[1] != 0));
+				newCloseDelim = !cdelim[0] ? NULL
+                : CFStringCreateWithCharacters(NULL, cdelim, 1+(cdelim[1] != 0));
+			} else if (!strcmp(selector, "rate")) {
+				paramSel= @selector(updateSpeechRate:);
+				curParam= &curRate;
+			handleNumericArgument:
+				SKIP_SPACES;
+				if (ch == '+' || ch == '-') {
+					relative = ch;
+					FETCH_NEXT_CHAR;
+				} else
+					relative = 0;
+				SKIP_SPACES;
+				for (argIx = 0; isdigit(ch) || ch == '.'; ++argIx) {
+					argument[argIx] = ch;
+					FETCH_NEXT_CHAR;
+				}
+				argument[argIx] = 0;
+				if (!argIx)
+					ERROR;
+				value = atof(argument);
+				/* TODO: Parameters need range check! */
+				switch (relative) {
+                    case '+':
+                        *curParam += value;
+                        break;
+                    case '-':
+                        *curParam -= value;
+                        break;
+                    default:
+                        *curParam = value;
+                        break;
+				}
+#if 0
+                // No support for rate change
+                [tokens encodeFloatCallback:paramSel value:*curParam];
+#endif
+			} else if (!strcmp(selector, "pbas")) {
+				paramSel= @selector(updatePitchBase:);
+				curParam= &curPitch;
+				goto handleNumericArgument;
+			} else if (!strcmp(selector, "volm")) {
+				paramSel= @selector(updateVolume:);
+				curParam= &curVol;
+				goto handleNumericArgument;
+			} else if (!strcmp(selector, "sync")) {
+				/* Sync accepts a wide range of formats */
+				uint32_t arg = 0;
+				SKIP_SPACES;
+				if (ch == '0') {
+					FETCH_NEXT_CHAR;
+					if (ch == 'x') {
+					hexArg:
+						FETCH_NEXT_CHAR;
+						while (isxdigit(ch)) {
+							arg	= arg*16 + ch - (isdigit(ch) ? '0' : 'a');
+							FETCH_NEXT_CHAR;
+						}
+					} else {
+						/* Initial 0 can be ignored */
+					decimalArg:
+						while (isdigit(ch)) {
+							arg = arg*10 + ch-'0';
+							FETCH_NEXT_CHAR;
+						}
+					}
+				} else if (ch == '$') {
+					goto hexArg;
+				} else if (isdigit(ch)) {
+					goto decimalArg;
+				} else if (ch == '\'' || ch == '"') {
+					UniChar quote = ch;
+					FETCH_NEXT_CHAR;
+					while (ch != quote && !(arg & 0xFF000000)) {
+						arg	= (arg << 8) | (ch & 0xFF);
+						FETCH_NEXT_CHAR;
+					}
+				} else {
+					arg = ch << 24;
+					FETCH_NEXT_CHAR;
+					arg |= (ch & 0xFF) << 16;
+					FETCH_NEXT_CHAR;
+					arg |= (ch & 0xFF) << 8;
+					FETCH_NEXT_CHAR;
+					arg |= (ch & 0xFF);
+					FETCH_NEXT_CHAR;
+				}
+#if 0
+                // Unfortunately, we have no idea when is each phoneme played
+                // in Google's TTS.
+				[tokens encodeSyncCallback:@selector(syncCallback:) value:arg];
+#endif
+			} else {   /* Unknown selector */
+				ERROR;
+			}
+		skipToNextCommand:
+			while (embedded < endEmbedded && isspace(ch))
+				FETCH_NEXT_CHAR;
+			if (embedded == endEmbedded)
+				break;
+			else if (ch != ';') {
+				FETCH_NEXT_CHAR;
+				ERROR;
+			}
+		}
+        
+		if (openDelim)
+			CFRelease(openDelim);
+		openDelim	= newOpenDelim;
+		if (closeDelim)
+			CFRelease(closeDelim);
+		closeDelim  = newCloseDelim;
+	}
+	if (remainingText.length)
+#if 0
+		[tokens encodeText:text range:remainingText];
+#else
+        [textToSpeak appendString:[(NSString*)text substringWithRange:NSMakeRange(remainingText.location, remainingText.length)]];
+#endif
+    
+	CFRelease(s);
+}
+
+
+- (long)startSpeaking:(CFStringRef)text
+	  noEndingProsody:(BOOL)noEndingProsody noInterrupt:(BOOL)noInterrupt
+			preflight:(BOOL)preflight
+{
+	/*
+	 * Test for currently active speech
+	 */
+	if ([self isActive]) {
+		if (noInterrupt) {
+			return synthNotReady;
+		} else {
+			[self stopSpeakingAt:kImmediate];
+			while ([self isActive])
+				usleep(5000);	// Test again in 5ms
+		}
+    }
+    
+	synthState	= kSynthStopped;
+#if 0
+    // We can't pause at anything except 'current time'
+    pauseToken	= kMorseNone;
+#endif
+    
+	if (!text || !CFStringGetLength(text))
+		return noErr;
+	
+	textBeingSpoken = CFRetain(text);
+	[self encodeText:text];
+	[self createSoundChannel:NO];
+#if 0
+	if (preflight)
+		synthState = kSynthPaused;
+	else
+		[self startSampleGeneration];
+#else
+    if (!preflight)
+        [player play];
+#endif
+	return noErr;
+}
+
+- (long)stopSpeakingAt:(unsigned long)whereToStop
+{
+	dispatch_sync(queue, ^{
+#if 0
+		switch (synthState) {
+            case kSynthStopping:
+            case kSynthStopped:
+                break;
+            case kSynthPaused:
+                [tokens clear];
+                synthState	= kSynthStopped;
+                break;
+            case kSynthPausing:
+                synthState = kSynthStopping;
+                break;
+            case kSynthRunning:
+                switch (whereToStop) {
+                    case kEndOfWord:
+                        [tokens trimTokens:kMorseWordGap];
+                        break;
+                    case kEndOfSentence:
+                        [tokens trimTokens:kMorseSentenceGap];
+                        break;
+                    default:
+                        if (audioOutput) {
+                            synthState = kSynthStopping;
+                            [audioOutput stopAudio];
+                            [tokens clear];
+                            unitsLeftInToken = 0;
+                        } else {
+                            synthState = kSynthStopped;
+                        }
+                } 
+                break;
+		}
+#else
+        // sadly, we can only stop immediately.
+        [self.player stop];
+#endif
+	});
+	
+	return noErr;
+}
+
+- (long)pauseSpeakingAt:(unsigned long)whereToStop
+{
+	dispatch_sync(queue, ^{
+#if 0
+		switch (synthState) {
+            case kSynthPausing:
+            case kSynthPaused:
+            case kSynthStopping:
+            case kSynthStopped:
+                break;
+            case kSynthRunning:
+                switch (whereToStop) {
+                    case kEndOfWord:
+                        pauseToken = kMorseWordGap;
+                        break;
+                    case kEndOfSentence:
+                        if (!pauseToken)
+                            pauseToken = kMorseSentenceGap;
+                        break;
+                    default:
+                        pauseToken = kMorseNone;
+                        synthState = kSynthPausing;
+                        [audioOutput stopAudio];
+                }
+                break;
+		}
+#else
+        // sadly, we can only stop immediately.
+        [self.player pause];
+#endif
+	});
+	
+	return noErr;
+}
+
+- (long)continueSpeaking
+{
+#if 0
+	switch (synthState) {
+        case kSynthPausing:
+        case kSynthPaused:
+            [tokens skipGaps];
+            [self startSampleGeneration];
+            break;
+        default:
+            break;
+	}
+#else
+    [self.player play];
+#endif
+    
+	return noErr;
+}
+
+- (long)copyPhonemes:(CFStringRef)text result:(CFStringRef *)phonemes
+{
+    // This synthesizer doesn't really distinguish between a concept
+    // of phonemes and text (since that's done server side).
+    *phonemes = (CFStringRef)[(NSString*)text copy];
+    return 0;
+}
+
+- (CFStringEncoding)stringEncodingForBuffer
+{
+	return NSUTF8StringEncoding; //stringEncodingForBuffer;
+}
+
+- (void)updateSpeechRate:(NSValue *)update
+{
+#if 0
+	speechRate = ((GoogleTTSCallback *)[update pointerValue])->arg.f;
+    // No support for speech rate
+#endif
+}
+
+- (void)updatePitchBase:(NSValue *)update
+{
+#if 0
+	pitchBase = ((GoogleTTSCallback *)[update pointerValue])->arg.f;
+    // No support for pitch base
+#endif
+}
+
+- (void)updateVolume:(NSValue *)update
+{
+#if 0
+	volume = ((GoogleTTSCallback *)[update pointerValue])->arg.f;
+    // We might support volume... but we have no support for callbacks,
+    // and we ignore embedded commands.
+#endif
+}
+
+
+- (void)wordCallback:(NSValue *)arg
+{
+#if 0
+	if (wordCallback) {
+		CFRange r = ((GoogleTTSCallback *)[arg pointerValue])->arg.r;
+		wordCallback((SpeechChannel)self, clientRefCon, textBeingSpoken, r);
+	}
+    // Again, we don't support callbacks
+#endif
+}
+
+- (void)syncCallback:(NSValue *)arg
+{
+#if 0
+	if (syncCallback)
+		syncCallback((SpeechChannel)self, clientRefCon, ((GoogleTTSCallback *)[arg pointerValue])->arg.u);
+    // No support for callbacks...
+#endif
+}
+
+
+
+- (void)createSoundChannel:(BOOL)forAudioUnit
+{
+	dispatch_sync(queue, ^{
+#if 0
+        if (!audioOutput)
+            if (audioFileRef == (ExtAudioFileRef)-1)
+                audioOutput = [AudioOutput createIgnoreAudio];
+            else if (audioFileRef)
+                audioOutput = [AudioOutput createFileAudio:audioFileRef];
+            else
+                audioOutput = [AudioOutput createLiveAudio:forAudioUnit withDevice:audioDevice];
+#else
+        NSDictionary *dictionary = [[[NSDictionary alloc] initWithObjectsAndKeys:
+                                     @"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/534.52.7 (KHTML, like Gecko) Version/5.1.2 Safari/534.52.7", @"UserAgent",
+                                     
+                                     nil] autorelease];
+        [[NSUserDefaults standardUserDefaults] registerDefaults:dictionary];
+
+        NSString * lang = @"en";
+        if([self.voiceName isEqualToString:@"Miku"])
+            lang = @"ja";
+        
+        NSLog(@"VN %@", self.voiceName);
+        
+        NSString * urlString = [NSString stringWithFormat:@"http://translate.google.com/translate_tts?q=%@&tl=%@&ie=UTF-8", [textToSpeak stringByAddingPercentEscapesUsingEncoding:NSUTF8StringEncoding], lang];
+        NSLog(@"url %@", urlString);
+        NSURL * url = [NSURL URLWithString:urlString];
+        NSData * data = [NSData dataWithContentsOfURL:url];
+        if(data)
+        {
+            self.player = [[[AVAudioPlayer alloc] initWithData:data error:nil] autorelease];
+            self.player.delegate = self;
+        }
+        else
+        {
+            self.player = [[[AVAudioPlayer alloc] initWithContentsOfURL:[NSURL fileURLWithPath:[[NSBundle bundleForClass:[self class]] pathForResource:@"problem" ofType:@"mp3"]] error:nil] autorelease];
+            self.player.delegate = self;
+        }
+#endif
+    });
+}
+
+
+- (void)disposeSoundChannel
+{
+	dispatch_sync(queue, ^{
+#if 0
+        [audioOutput close];
+        
+        audioOutput	  	= 0;
+        audioDevice	= kAudioDeviceUnknown;
+        
+        if (audioFileRef) {
+            if (audioFileOwned)
+                ExtAudioFileDispose(audioFileRef);
+            audioFileRef 	= 0;
+            audioFileOwned 	= NO;
+        }
+#else
+        [player release];
+        player = nil;
+#endif
+    });
+}
+
+
+
+/////////////
+// MARK: AVAudioPlayer delegate
+
+- (void)audioPlayerDidFinishPlaying:(AVAudioPlayer *)player successfully:(BOOL)flag;
+{
+    if (textDoneCallback) {
+        /* The text done callback used to allow clients to pass in more text,
+         but that feature is deprecated. We pass a NULL pointer so clients
+         will hopefully get the hint.
+         */
+        const void * 	nextBuf		= NULL;
+        unsigned long 	byteLen		= 0;
+        SInt32	   		controlFlags= 0;
+        textDoneCallback((SpeechChannel)self, clientRefCon, &nextBuf, &byteLen, &controlFlags);
+    }
+
+    speechDoneCallback((SpeechChannel)self, clientRefCon);
+}
+
+@end

GoogleTTS/GoogleTTSSynthesizerCF.h

+//
+//  GoogleTTSSynthesizer+GoogleTTSSynthesizerCF.h
+//  GoogleTTS
+//
+//  Created by Ivan Vučica on 4.8.2012..
+//  Copyright (c) 2012. Ivan Vučica. All rights reserved.
+//
+
+#import "GoogleTTSSynthesizer.h"
+
+@interface GoogleTTSSynthesizer (GoogleTTSSynthesizerCF)
+
+@end

GoogleTTS/GoogleTTSSynthesizerCF.m

+//
+//  GoogleTTSSynthesizer+GoogleTTSSynthesizerCF.m
+//  GoogleTTS
+//
+//  Created by Ivan Vučica on 4.8.2012..
+//  Copyright (c) 2012. Ivan Vučica. All rights reserved.
+//
+
+#import "GoogleTTSSynthesizerCF.h"
+
+
+static CFNumberRef	newFloat(float value)
+{
+	return CFNumberCreate(NULL, kCFNumberFloatType, &value);
+}
+
+static CFNumberRef	newInt(int value)
+{
+	return CFNumberCreate(NULL, kCFNumberIntType, &value);
+}
+
+static CFNumberRef	newPtr(void * value)
+{
+	return CFNumberCreate(NULL, kCFNumberLongType, &value);
+}
+
+@implementation GoogleTTSSynthesizer (GoogleTTSSynthesizerCF)
+
+- (long)copyProperty:(CFStringRef)property result:(CFTypeRef *)object
+{
+	if (!CFStringCompare(property, kSpeechInputModeProperty, 0))
+		*object = CFRetain(kSpeechModeText);
+	else if (!CFStringCompare(property, kSpeechCharacterModeProperty, 0))
+		*object = CFRetain(kSpeechModeNormal);
+	else if (!CFStringCompare(property, kSpeechNumberModeProperty, 0))
+		*object = CFRetain(kSpeechModeNormal);
+	else if (!CFStringCompare(property, kSpeechRateProperty, 0))
+		*object = newFloat(speechRate);
+	else if (!CFStringCompare(property, kSpeechPitchBaseProperty, 0))
+		*object = newFloat(pitchBase);
+	else if (!CFStringCompare(property, kSpeechPitchModProperty, 0))
+		*object = newFloat(0.0f);
+	else if (!CFStringCompare(property, kSpeechVolumeProperty, 0))
+		*object = newFloat(volume);
+	else if (!CFStringCompare(property, kSpeechStatusProperty, 0)) {
+		CFTypeRef statusKeys[4];
+		CFTypeRef statusValues[4];
+		
+		statusKeys[0]	= kSpeechStatusOutputBusy;
+		statusValues[0]	= newInt(synthState == kSynthStopped || synthState == kSynthPaused ? 0 : 1);
+		statusKeys[1]	= kSpeechStatusOutputPaused;