Merge change 3083
* changes:
Generalize bitmap support and add remaining GL formats. Fix bug in command fifo looping case.
diff --git a/opengl/libagl/array.cpp b/opengl/libagl/array.cpp
index 8fa7566..3e9c6a5 100644
--- a/opengl/libagl/array.cpp
+++ b/opengl/libagl/array.cpp
@@ -951,6 +951,8 @@
v->index = first;
first &= vertex_cache_t::INDEX_MASK;
const GLubyte* vp = c->arrays.vertex.element(first);
+ v->obj.z = 0;
+ v->obj.w = 0x10000;
c->arrays.vertex.fetch(c, v->obj.v, vp);
c->arrays.mvp_transform(&c->transforms.mvp, &v->clip, &v->obj);
c->arrays.perspective(c, v);
@@ -966,6 +968,8 @@
do {
v->flags = 0;
v->index = first++;
+ v->obj.z = 0;
+ v->obj.w = 0x10000;
c->arrays.vertex.fetch(c, v->obj.v, vp);
c->arrays.mvp_transform(mvp, &v->clip, &v->obj);
c->arrays.perspective(c, v);
diff --git a/opengl/libagl/light.cpp b/opengl/libagl/light.cpp
index 25c41d0..8ae32cc 100644
--- a/opengl/libagl/light.cpp
+++ b/opengl/libagl/light.cpp
@@ -38,13 +38,14 @@
static void lightVertexMaterial(ogles_context_t* c, vertex_t* v);
static inline void vscale3(GLfixed* d, const GLfixed* m, GLfixed s);
-static inline void vsub3w(GLfixed* d, const GLfixed* a, const GLfixed* b);
static __attribute__((noinline))
void vnorm3(GLfixed* d, const GLfixed* a);
static inline void vsa3(GLfixed* d,
const GLfixed* m, GLfixed s, const GLfixed* a);
+static inline void vss3(GLfixed* d,
+ const GLfixed* m, GLfixed s, const GLfixed* a);
static inline void vmla3(GLfixed* d,
const GLfixed* m0, const GLfixed* m1, const GLfixed* a);
static inline void vmul3(GLfixed* d,
@@ -151,18 +152,10 @@
}
static inline
-void vsub3w(GLfixed* d, const GLfixed* a, const GLfixed* b) {
- const GLfixed wa = a[3];
- const GLfixed wb = b[3];
- if (ggl_likely(wa == wb)) {
- d[0] = a[0] - b[0];
- d[1] = a[1] - b[1];
- d[2] = a[2] - b[2];
- } else {
- d[0] = gglMulSubx(a[0], wb, gglMulx(b[0], wa));
- d[1] = gglMulSubx(a[1], wb, gglMulx(b[1], wa));
- d[2] = gglMulSubx(a[2], wb, gglMulx(b[2], wa));
- }
+void vss3(GLfixed* d, const GLfixed* m, GLfixed s, const GLfixed* a) {
+ d[0] = gglMulSubx(m[0], s, a[0]);
+ d[1] = gglMulSubx(m[1], s, a[1]);
+ d[2] = gglMulSubx(m[2], s, a[2]);
}
static inline
@@ -227,7 +220,7 @@
const int i = 31 - gglClz(en);
en &= ~(1<<i);
light_t& l = c->lighting.lights[i];
- c->transforms.mvui.point3(&c->transforms.mvui,
+ c->transforms.mvui.point4(&c->transforms.mvui,
&l.objPosition, &l.position);
vnorm3(l.normalizedObjPosition.v, l.objPosition.v);
}
@@ -318,6 +311,11 @@
vmul3(l.implicitAmbient.v, material.ambient.v, l.ambient.v);
vmul3(l.implicitDiffuse.v, material.diffuse.v, l.diffuse.v);
vmul3(l.implicitSpecular.v, material.specular.v, l.specular.v);
+ // this is just a flag to tell if we have a specular component
+ l.implicitSpecular.v[3] =
+ l.implicitSpecular.r |
+ l.implicitSpecular.g |
+ l.implicitSpecular.b;
}
// emission and ambient for the whole scene
vmla3( c->lighting.implicitSceneEmissionAndAmbient.v,
@@ -343,7 +341,11 @@
vec4_t n;
c->arrays.normal.fetch(c, n.v,
c->arrays.normal.element(v->index & vertex_cache_t::INDEX_MASK));
- if (c->transforms.rescaleNormals == GL_NORMALIZE)
+
+ // TODO: right now we handle GL_RESCALE_NORMALS as if ti were
+ // GL_NORMALIZE. We could optimize this by scaling mvui
+ // appropriately instead.
+ if (c->transforms.rescaleNormals)
vnorm3(n.v, n.v);
const material_t& material = c->lighting.front;
@@ -360,7 +362,8 @@
// compute vertex-to-light vector
if (ggl_unlikely(l.position.w)) {
- vsub3w(d.v, l.objPosition.v, v->obj.v);
+ // lightPos/1.0 - vertex/vertex.w == lightPos*vertex.w - vertex
+ vss3(d.v, l.objPosition.v, v->obj.w, v->obj.v);
sqDist = dot3(d.v, d.v);
vscale3(d.v, d.v, gglSqrtRecipx(sqDist));
} else {
diff --git a/opengl/libagl/matrix.cpp b/opengl/libagl/matrix.cpp
index f175cda..0b68dc0 100644
--- a/opengl/libagl/matrix.cpp
+++ b/opengl/libagl/matrix.cpp
@@ -55,7 +55,7 @@
static void point2__generic(transform_t const*, vec4_t* c, vec4_t const* o);
static void point3__generic(transform_t const*, vec4_t* c, vec4_t const* o);
static void point4__generic(transform_t const*, vec4_t* c, vec4_t const* o);
-static void normal__generic(transform_t const*, vec4_t* c, vec4_t const* o);
+static void point4__mvui(transform_t const*, vec4_t* c, vec4_t const* o);
// ----------------------------------------------------------------------------
#if 0
@@ -209,7 +209,8 @@
{
flags = 0;
ops = OP_ALL;
- point3 = normal__generic;
+ point3 = point4__mvui;
+ point4 = point4__mvui;
}
void transform_t::dump(const char* what)
@@ -596,66 +597,19 @@
void transform_state_t::update_mvui()
{
+ GLfloat r[16];
const GLfloat* const mv = modelview.top().elements();
-
- /*
- When transforming normals, we can use the upper 3x3 matrix, see:
- http://www.opengl.org/documentation/specs/version1.1/glspec1.1/node26.html
- */
- // Also note that:
- // l(obj) = tr(M).l(eye) for infinite light
- // l(obj) = inv(M).l(eye) for local light
-
- const uint32_t ops = modelview.top_ops() & ~OP_TRANSLATE;
- if (ggl_likely((!(ops & ~OP_ROTATE)) ||
- (rescaleNormals && modelview.isRigidBody()))) {
- // if the modelview matrix is a rigid body transformation
- // (translation, rotation, uniform scaling), then we can bypass
- // the inverse by transposing the matrix.
- GLfloat rescale = 1.0f;
- if (rescaleNormals == GL_RESCALE_NORMAL) {
- if (!(ops & ~OP_UNIFORM_SCALE)) {
- rescale = reciprocalf(mv[I(0,0)]);
- } else {
- rescale = rsqrtf(
- sqrf(mv[I(2,0)]) + sqrf(mv[I(2,1)]) + sqrf(mv[I(2,2)]));
- }
- }
- GLfixed* const x = mvui.matrix.m;
- for (int i=0 ; i<3 ; i++) {
- x[I(i,0)] = gglFloatToFixed(mv[I(0,i)] * rescale);
- x[I(i,1)] = gglFloatToFixed(mv[I(1,i)] * rescale);
- x[I(i,2)] = gglFloatToFixed(mv[I(2,i)] * rescale);
- }
- mvui.picker();
- return;
- }
-
- GLfloat r[3][3];
- r[0][0] = det22(mv[I(1,1)], mv[I(2,1)], mv[I(1,2)], mv[I(2,2)]);
- r[0][1] =ndet22(mv[I(0,1)], mv[I(2,1)], mv[I(0,2)], mv[I(2,2)]);
- r[0][2] = det22(mv[I(0,1)], mv[I(1,1)], mv[I(0,2)], mv[I(1,2)]);
- r[1][0] =ndet22(mv[I(1,0)], mv[I(2,0)], mv[I(1,2)], mv[I(2,2)]);
- r[1][1] = det22(mv[I(0,0)], mv[I(2,0)], mv[I(0,2)], mv[I(2,2)]);
- r[1][2] =ndet22(mv[I(0,0)], mv[I(1,0)], mv[I(0,2)], mv[I(1,2)]);
- r[2][0] = det22(mv[I(1,0)], mv[I(2,0)], mv[I(1,1)], mv[I(2,1)]);
- r[2][1] =ndet22(mv[I(0,0)], mv[I(2,0)], mv[I(0,1)], mv[I(2,1)]);
- r[2][2] = det22(mv[I(0,0)], mv[I(1,0)], mv[I(0,1)], mv[I(1,1)]);
-
- GLfloat rdet;
- if (rescaleNormals == GL_RESCALE_NORMAL) {
- rdet = rsqrtf(sqrf(r[0][2]) + sqrf(r[1][2]) + sqrf(r[2][2]));
- } else {
- rdet = reciprocalf(
- r[0][0]*mv[I(0,0)] + r[0][1]*mv[I(1,0)] + r[0][2]*mv[I(2,0)]);
- }
+ // TODO: we need a faster invert, especially for when the modelview
+ // is a rigid-body matrix
+ invert(r, mv);
GLfixed* const x = mvui.matrix.m;
- for (int i=0 ; i<3 ; i++) {
- x[I(i,0)] = gglFloatToFixed(r[i][0] * rdet);
- x[I(i,1)] = gglFloatToFixed(r[i][1] * rdet);
- x[I(i,2)] = gglFloatToFixed(r[i][2] * rdet);
+ for (int i=0 ; i<4 ; i++) {
+ x[I(i,0)] = gglFloatToFixed(r[I(i,0)]);
+ x[I(i,1)] = gglFloatToFixed(r[I(i,1)]);
+ x[I(i,2)] = gglFloatToFixed(r[I(i,2)]);
+ x[I(i,4)] = gglFloatToFixed(r[I(i,3)]);
}
mvui.picker();
}
@@ -783,14 +737,19 @@
lhs->w = mla4(rx, m[ 3], ry, m[ 7], rz, m[11], rw, m[15]);
}
-void normal__generic(transform_t const* mx, vec4_t* lhs, vec4_t const* rhs) {
+void point4__mvui(transform_t const* mx, vec4_t* lhs, vec4_t const* rhs) {
+ // this used for transforming light positions back to object space.
+ // Lights have 3 components positions, so w is always 1.
+ // however, it is used as a switch for directional lights, so we need
+ // to preserve it.
const GLfixed* const m = mx->matrix.m;
const GLfixed rx = rhs->x;
const GLfixed ry = rhs->y;
const GLfixed rz = rhs->z;
- lhs->x = mla3(rx, m[ 0], ry, m[ 4], rz, m[ 8]);
- lhs->y = mla3(rx, m[ 1], ry, m[ 5], rz, m[ 9]);
- lhs->z = mla3(rx, m[ 2], ry, m[ 6], rz, m[10]);
+ lhs->x = mla3a(rx, m[ 0], ry, m[ 4], rz, m[ 8], m[12]);
+ lhs->y = mla3a(rx, m[ 1], ry, m[ 5], rz, m[ 9], m[13]);
+ lhs->z = mla3a(rx, m[ 2], ry, m[ 6], rz, m[10], m[14]);
+ lhs->w = rhs->w;
}
diff --git a/opengl/tests/lighting1709/Android.mk b/opengl/tests/lighting1709/Android.mk
new file mode 100644
index 0000000..9563e61
--- /dev/null
+++ b/opengl/tests/lighting1709/Android.mk
@@ -0,0 +1,11 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := $(call all-subdir-java-files)
+
+LOCAL_PACKAGE_NAME := LightingTest
+LOCAL_CERTIFICATE := platform
+
+include $(BUILD_PACKAGE)
diff --git a/opengl/tests/lighting1709/AndroidManifest.xml b/opengl/tests/lighting1709/AndroidManifest.xml
new file mode 100644
index 0000000..6c23d42
--- /dev/null
+++ b/opengl/tests/lighting1709/AndroidManifest.xml
@@ -0,0 +1,13 @@
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.lightingtest">
+
+ <application>
+ <activity android:name="ClearActivity" android:label="LightingTest">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.DEFAULT" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+</manifest>
diff --git a/opengl/tests/lighting1709/src/com/android/lightingtest/ClearActivity.java b/opengl/tests/lighting1709/src/com/android/lightingtest/ClearActivity.java
new file mode 100644
index 0000000..3ae8c5c
--- /dev/null
+++ b/opengl/tests/lighting1709/src/com/android/lightingtest/ClearActivity.java
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.lightingtest;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.FloatBuffer;
+
+import javax.microedition.khronos.egl.EGL10;
+import javax.microedition.khronos.egl.EGLConfig;
+import javax.microedition.khronos.opengles.GL10;
+
+import android.app.Activity;
+import android.content.Context;
+import android.opengl.GLSurfaceView;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.MotionEvent;
+
+public class ClearActivity extends Activity {
+ @Override
+ protected void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ mGLView = new ClearGLSurfaceView(this);
+ setContentView(mGLView);
+ }
+
+ @Override
+ protected void onPause() {
+ super.onPause();
+ mGLView.onPause();
+ }
+
+ @Override
+ protected void onResume() {
+ super.onResume();
+ mGLView.onResume();
+ }
+ private GLSurfaceView mGLView;
+}
+
+class ClearGLSurfaceView extends GLSurfaceView {
+ public ClearGLSurfaceView(Context context) {
+ super(context);
+ mRenderer = new ClearRenderer();
+ setRenderer(mRenderer);
+ }
+
+ ClearRenderer mRenderer;
+}
+
+class ClearRenderer implements GLSurfaceView.Renderer {
+ public ClearRenderer() {
+ }
+
+ public void onSurfaceCreated(GL10 gl, EGLConfig config) {
+ // Do nothing special.
+ }
+
+ public void onSurfaceChanged(GL10 gl, int w, int h) {
+ // Compute the projection matrix
+ gl.glMatrixMode(GL10.GL_PROJECTION);
+ gl.glLoadIdentity();
+
+ // Compute the boundaries of the frustum
+ float fl = (float) (-(w / 2)) / 288;
+ float fr = (float) (w / 2) / 288;
+ float ft = (float) (h / 2) / 288;
+ float fb = (float) (-(h / 2)) / 288;
+
+ // Set the view frustum
+ gl.glFrustumf(fl, fr, fb, ft, 1.0f, 2000.0f);
+
+ // Set the viewport dimensions
+ gl.glMatrixMode(GL10.GL_MODELVIEW);
+ gl.glLoadIdentity();
+ gl.glViewport(0, 0, w, h);
+ }
+
+ public void onDrawFrame(GL10 gl) {
+ gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
+
+ final float lightOff[] = {0.0f, 0.0f, 0.0f, 1.0f};
+ final float lightAmbient[] = {5.0f, 0.0f, 0.0f, 1.0f};
+ final float lightDiffuse[] = {0.0f, 2.0f, 0.0f, 0.0f};
+ final float lightPosSpot[] = {0.0f, 0.0f, -8.0f, 1.0f};
+
+ final float pos[] = {
+ -5.0f, -1.5f, 0.0f,
+ 0.0f, -1.5f, 0.0f,
+ 5.0f, -1.5f, 0.0f,
+ };
+
+ final float v[] = new float[9];
+ ByteBuffer vbb = ByteBuffer.allocateDirect(v.length*4);
+ vbb.order(ByteOrder.nativeOrder());
+ FloatBuffer vb = vbb.asFloatBuffer();
+
+ gl.glDisable(GL10.GL_DITHER);
+
+ gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_AMBIENT, lightAmbient, 0);
+ gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_DIFFUSE, lightDiffuse, 0);
+ gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_SPECULAR, lightOff, 0);
+ gl.glLightfv(GL10.GL_LIGHT0, GL10.GL_POSITION, lightPosSpot, 0);
+ gl.glEnable(GL10.GL_LIGHT0);
+
+ gl.glEnable(GL10.GL_LIGHTING);
+
+
+ gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
+ gl.glNormal3f(0, 0, 1);
+
+
+ // draw first 3 triangles, without using transforms
+ for (int i=0 ; i<3 ; i++) {
+ v[0] = -1; v[1] =-1; v[2] = -10;
+ v[3] = 0; v[4] = 1; v[5] = -10;
+ v[6] = 1; v[7] =-1; v[8] = -10;
+ for (int j=0 ; j<3 ; j++) {
+ v[j*3+0] -= pos[i*3+0];
+ v[j*3+1] -= pos[i*3+1];
+ v[j*3+2] -= pos[i*3+2];
+ }
+ vb.put(v).position(0);
+ gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vb);
+ gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, 3);
+ }
+
+ // draw the 2nd batch this time with transforms
+ v[0] = -1; v[1] =-1; v[2] = -10;
+ v[3] = 0; v[4] = 1; v[5] = -10;
+ v[6] = 1; v[7] =-1; v[8] = -10;
+ vb.put(v).position(0);
+ gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vb);
+
+ // draw lower left triangle
+ gl.glPushMatrix();
+ gl.glTranslatef(pos[0], pos[1], pos[2]);
+ gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, 3);
+ gl.glPopMatrix();
+
+ // draw lower middle triangle
+ gl.glPushMatrix();
+ gl.glTranslatef(pos[3], pos[4], pos[5]);
+ gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, 3);
+ gl.glPopMatrix();
+
+ // draw lower right triangle
+ gl.glPushMatrix();
+ gl.glTranslatef(pos[6], pos[7], pos[8]);
+ gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, 3);
+ gl.glPopMatrix();
+ }
+
+ public int[] getConfigSpec() {
+ int[] configSpec = { EGL10.EGL_DEPTH_SIZE, 16, EGL10.EGL_NONE };
+ return configSpec;
+ }
+}
+
diff --git a/tts/java/android/tts/SynthProxy.java b/tts/java/android/tts/SynthProxy.java
new file mode 100755
index 0000000..e065f40
--- /dev/null
+++ b/tts/java/android/tts/SynthProxy.java
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.tts;
+
+import android.util.Log;
+import java.lang.ref.WeakReference;
+
+/**
+ * @hide
+ *
+ * The SpeechSynthesis class provides a high-level api to create and play
+ * synthesized speech. This class is used internally to talk to a native
+ * TTS library that implements the interface defined in
+ * frameworks/base/include/tts/TtsEngine.h
+ *
+ */
+@SuppressWarnings("unused")
+public class SynthProxy {
+
+ //
+ // External API
+ //
+
+ /**
+ * Constructor; pass the location of the native TTS .so to use.
+ */
+ public SynthProxy(String nativeSoLib) {
+ Log.e("TTS is loading", nativeSoLib);
+ native_setup(new WeakReference<SynthProxy>(this), nativeSoLib);
+ }
+
+ /**
+ * Stops and clears the AudioTrack.
+ */
+ public void stop() {
+ native_stop(mJniData);
+ }
+
+ /**
+ * Synthesize speech and speak it directly using AudioTrack.
+ */
+ public void speak(String text) {
+ native_speak(mJniData, text);
+ }
+
+ /**
+ * Synthesize speech to a file. The current implementation writes a valid
+ * WAV file to the given path, assuming it is writable. Something like
+ * "/sdcard/???.wav" is recommended.
+ */
+ public void synthesizeToFile(String text, String filename) {
+ native_synthesizeToFile(mJniData, text, filename);
+ }
+
+ // TODO add IPA methods
+
+ /**
+ * Sets the language
+ */
+ public void setLanguage(String language) {
+ native_setLanguage(mJniData, language);
+ }
+
+ /**
+ * Sets the speech rate
+ */
+ public final void setSpeechRate(int speechRate) {
+ native_setSpeechRate(mJniData, speechRate);
+ }
+
+
+ /**
+ * Plays the given audio buffer
+ */
+ public void playAudioBuffer(int bufferPointer, int bufferSize) {
+ native_playAudioBuffer(mJniData, bufferPointer, bufferSize);
+ }
+
+ /**
+ * Gets the currently set language
+ */
+ public String getLanguage() {
+ return native_getLanguage(mJniData);
+ }
+
+ /**
+ * Gets the currently set rate
+ */
+ public int getRate() {
+ return native_getRate(mJniData);
+ }
+
+ /**
+ * Shuts down the native synthesizer
+ */
+ public void shutdown() {
+ native_shutdown(mJniData);
+ }
+
+ //
+ // Internal
+ //
+
+ protected void finalize() {
+ native_finalize(mJniData);
+ mJniData = 0;
+ }
+
+ static {
+ System.loadLibrary("ttssynthproxy");
+ }
+
+ private final static String TAG = "SynthProxy";
+
+ /**
+ * Accessed by native methods
+ */
+ private int mJniData = 0;
+
+ private native final void native_setup(Object weak_this,
+ String nativeSoLib);
+
+ private native final void native_finalize(int jniData);
+
+ private native final void native_stop(int jniData);
+
+ private native final void native_speak(int jniData, String text);
+
+ private native final void native_synthesizeToFile(int jniData, String text, String filename);
+
+ private native final void native_setLanguage(int jniData, String language);
+
+ private native final void native_setSpeechRate(int jniData, int speechRate);
+
+ // TODO add buffer format
+ private native final void native_playAudioBuffer(int jniData, int bufferPointer, int bufferSize);
+
+ private native final String native_getLanguage(int jniData);
+
+ private native final int native_getRate(int jniData);
+
+ private native final void native_shutdown(int jniData);
+
+
+ /**
+ * Callback from the C layer
+ */
+ @SuppressWarnings("unused")
+ private static void postNativeSpeechSynthesizedInJava(Object tts_ref,
+ int bufferPointer, int bufferSize) {
+
+ Log.i("TTS plugin debug", "bufferPointer: " + bufferPointer
+ + " bufferSize: " + bufferSize);
+
+ SynthProxy nativeTTS = (SynthProxy)((WeakReference)tts_ref).get();
+ // TODO notify TTS service of synthesis/playback completion,
+ // method definition to be changed.
+ }
+}
diff --git a/tts/java/android/tts/TtsService.java b/tts/java/android/tts/TtsService.java
new file mode 100755
index 0000000..d317181
--- /dev/null
+++ b/tts/java/android/tts/TtsService.java
@@ -0,0 +1,783 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package android.tts;
+
+import android.tts.ITts.Stub;
+
+import android.app.Service;
+import android.content.Context;
+import android.content.Intent;
+import android.content.SharedPreferences;
+import android.content.pm.PackageManager;
+import android.content.pm.ResolveInfo;
+import android.content.pm.PackageManager.NameNotFoundException;
+import android.media.MediaPlayer;
+import android.media.MediaPlayer.OnCompletionListener;
+import android.net.Uri;
+import android.os.IBinder;
+import android.os.RemoteCallbackList;
+import android.os.RemoteException;
+import android.preference.PreferenceManager;
+import android.util.Log;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * @hide Synthesizes speech from text. This is implemented as a service so that
+ * other applications can call the TTS without needing to bundle the TTS
+ * in the build.
+ *
+ */
+public class TtsService extends Service implements OnCompletionListener {
+
+ private static class SpeechItem {
+ public static final int SPEECH = 0;
+ public static final int EARCON = 1;
+ public static final int SILENCE = 2;
+ public String mText = null;
+ public ArrayList<String> mParams = null;
+ public int mType = SPEECH;
+ public long mDuration = 0;
+
+ public SpeechItem(String text, ArrayList<String> params, int itemType) {
+ mText = text;
+ mParams = params;
+ mType = itemType;
+ }
+
+ public SpeechItem(long silenceTime) {
+ mDuration = silenceTime;
+ }
+ }
+
+ /**
+ * Contains the information needed to access a sound resource; the name of
+ * the package that contains the resource and the resID of the resource
+ * within that package.
+ */
+ private static class SoundResource {
+ public String mSourcePackageName = null;
+ public int mResId = -1;
+ public String mFilename = null;
+
+ public SoundResource(String packageName, int id) {
+ mSourcePackageName = packageName;
+ mResId = id;
+ mFilename = null;
+ }
+
+ public SoundResource(String file) {
+ mSourcePackageName = null;
+ mResId = -1;
+ mFilename = file;
+ }
+ }
+
+ private static final String ACTION = "android.intent.action.USE_TTS";
+ private static final String CATEGORY = "android.intent.category.TTS";
+ private static final String PKGNAME = "android.tts";
+
+ final RemoteCallbackList<ITtsCallback> mCallbacks = new RemoteCallbackList<ITtsCallback>();
+
+ private Boolean mIsSpeaking;
+ private ArrayList<SpeechItem> mSpeechQueue;
+ private HashMap<String, SoundResource> mEarcons;
+ private HashMap<String, SoundResource> mUtterances;
+ private MediaPlayer mPlayer;
+ private TtsService mSelf;
+
+ private SharedPreferences prefs;
+
+ private final ReentrantLock speechQueueLock = new ReentrantLock();
+ private final ReentrantLock synthesizerLock = new ReentrantLock();
+
+ // TODO support multiple SpeechSynthesis objects
+ private SynthProxy nativeSynth;
+
+ @Override
+ public void onCreate() {
+ super.onCreate();
+ Log.i("TTS", "TTS starting");
+
+
+ // TODO: Make this work when the settings are done in the main Settings
+ // app.
+ prefs = PreferenceManager.getDefaultSharedPreferences(this);
+
+ // TODO: This should be changed to work by requesting the path
+ // from the default engine.
+ nativeSynth = new SynthProxy(prefs.getString("engine_pref", ""));
+
+
+ mSelf = this;
+ mIsSpeaking = false;
+
+ mEarcons = new HashMap<String, SoundResource>();
+ mUtterances = new HashMap<String, SoundResource>();
+
+ mSpeechQueue = new ArrayList<SpeechItem>();
+ mPlayer = null;
+
+ setLanguage(prefs.getString("lang_pref", "en-rUS"));
+ setSpeechRate(Integer.parseInt(prefs.getString("rate_pref", "140")));
+ }
+
+ @Override
+ public void onDestroy() {
+ super.onDestroy();
+ // Don't hog the media player
+ cleanUpPlayer();
+
+ nativeSynth.shutdown();
+
+ // Unregister all callbacks.
+ mCallbacks.kill();
+ }
+
+ private void setSpeechRate(int rate) {
+ if (prefs.getBoolean("override_pref", false)) {
+ // This is set to the default here so that the preview in the prefs
+ // activity will show the change without a restart, even if apps are
+ // not allowed to change the defaults.
+ rate = Integer.parseInt(prefs.getString("rate_pref", "140"));
+ }
+ nativeSynth.setSpeechRate(rate);
+ }
+
+ private void setLanguage(String lang) {
+ if (prefs.getBoolean("override_pref", false)) {
+ // This is set to the default here so that the preview in the prefs
+ // activity will show the change without a restart, even if apps are
+ // not
+ // allowed to change the defaults.
+ lang = prefs.getString("lang_pref", "en-rUS");
+ }
+ nativeSynth.setLanguage(lang);
+ }
+
+ private void setEngine(String engineName, String[] requestedLanguages,
+ int strictness) {
+ // TODO: Implement engine selection code here.
+ Intent engineIntent = new Intent(
+ "android.intent.action.START_TTS_ENGINE");
+ if (engineName != null) {
+ engineIntent.addCategory("android.intent.action.tts_engine."
+ + engineName);
+ }
+ for (int i = 0; i < requestedLanguages.length; i++) {
+ engineIntent.addCategory("android.intent.action.tts_lang."
+ + requestedLanguages[i]);
+ }
+ ResolveInfo[] enginesArray = new ResolveInfo[0];
+ PackageManager pm = getPackageManager();
+ enginesArray = pm.queryIntentActivities(engineIntent, 0).toArray(
+ enginesArray);
+ }
+
+ private void setEngine(Intent engineIntent) {
+ // TODO: Implement engine selection code here.
+ }
+
+ private int getEngineStatus() {
+ // TODO: Proposal - add a sanity check method that
+ // TTS engine plugins must implement.
+ return 0;
+ }
+
+ /**
+ * Adds a sound resource to the TTS.
+ *
+ * @param text
+ * The text that should be associated with the sound resource
+ * @param packageName
+ * The name of the package which has the sound resource
+ * @param resId
+ * The resource ID of the sound within its package
+ */
+ private void addSpeech(String text, String packageName, int resId) {
+ mUtterances.put(text, new SoundResource(packageName, resId));
+ }
+
+ /**
+ * Adds a sound resource to the TTS.
+ *
+ * @param text
+ * The text that should be associated with the sound resource
+ * @param filename
+ * The filename of the sound resource. This must be a complete
+ * path like: (/sdcard/mysounds/mysoundbite.mp3).
+ */
+ private void addSpeech(String text, String filename) {
+ mUtterances.put(text, new SoundResource(filename));
+ }
+
+ /**
+ * Adds a sound resource to the TTS as an earcon.
+ *
+ * @param earcon
+ * The text that should be associated with the sound resource
+ * @param packageName
+ * The name of the package which has the sound resource
+ * @param resId
+ * The resource ID of the sound within its package
+ */
+ private void addEarcon(String earcon, String packageName, int resId) {
+ mEarcons.put(earcon, new SoundResource(packageName, resId));
+ }
+
+ /**
+ * Adds a sound resource to the TTS as an earcon.
+ *
+ * @param earcon
+ * The text that should be associated with the sound resource
+ * @param filename
+ * The filename of the sound resource. This must be a complete
+ * path like: (/sdcard/mysounds/mysoundbite.mp3).
+ */
+ private void addEarcon(String earcon, String filename) {
+ mEarcons.put(earcon, new SoundResource(filename));
+ }
+
+ /**
+ * Speaks the given text using the specified queueing mode and parameters.
+ *
+ * @param text
+ * The text that should be spoken
+ * @param queueMode
+ * 0 for no queue (interrupts all previous utterances), 1 for
+ * queued
+ * @param params
+ * An ArrayList of parameters. This is not implemented for all
+ * engines.
+ */
+ private void speak(String text, int queueMode, ArrayList<String> params) {
+ if (queueMode == 0) {
+ stop();
+ }
+ mSpeechQueue.add(new SpeechItem(text, params, SpeechItem.SPEECH));
+ if (!mIsSpeaking) {
+ processSpeechQueue();
+ }
+ }
+
+ /**
+ * Plays the earcon using the specified queueing mode and parameters.
+ *
+ * @param earcon
+ * The earcon that should be played
+ * @param queueMode
+ * 0 for no queue (interrupts all previous utterances), 1 for
+ * queued
+ * @param params
+ * An ArrayList of parameters. This is not implemented for all
+ * engines.
+ */
+ private void playEarcon(String earcon, int queueMode,
+ ArrayList<String> params) {
+ if (queueMode == 0) {
+ stop();
+ }
+ mSpeechQueue.add(new SpeechItem(earcon, params, SpeechItem.EARCON));
+ if (!mIsSpeaking) {
+ processSpeechQueue();
+ }
+ }
+
+ /**
+ * Stops all speech output and removes any utterances still in the queue.
+ */
+ private void stop() {
+ Log.i("TTS", "Stopping");
+ mSpeechQueue.clear();
+
+ nativeSynth.stop();
+ mIsSpeaking = false;
+ if (mPlayer != null) {
+ try {
+ mPlayer.stop();
+ } catch (IllegalStateException e) {
+ // Do nothing, the player is already stopped.
+ }
+ }
+ Log.i("TTS", "Stopped");
+ }
+
+ public void onCompletion(MediaPlayer arg0) {
+ processSpeechQueue();
+ }
+
+ private void playSilence(long duration, int queueMode,
+ ArrayList<String> params) {
+ if (queueMode == 0) {
+ stop();
+ }
+ mSpeechQueue.add(new SpeechItem(duration));
+ if (!mIsSpeaking) {
+ processSpeechQueue();
+ }
+ }
+
+ private void silence(final long duration) {
+ class SilenceThread implements Runnable {
+ public void run() {
+ try {
+ Thread.sleep(duration);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ } finally {
+ processSpeechQueue();
+ }
+ }
+ }
+ Thread slnc = (new Thread(new SilenceThread()));
+ slnc.setPriority(Thread.MIN_PRIORITY);
+ slnc.start();
+ }
+
+ private void speakInternalOnly(final String text,
+ final ArrayList<String> params) {
+ class SynthThread implements Runnable {
+ public void run() {
+ boolean synthAvailable = false;
+ try {
+ synthAvailable = synthesizerLock.tryLock();
+ if (!synthAvailable) {
+ Thread.sleep(100);
+ Thread synth = (new Thread(new SynthThread()));
+ synth.setPriority(Thread.MIN_PRIORITY);
+ synth.start();
+ return;
+ }
+ nativeSynth.speak(text);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ } finally {
+ // This check is needed because finally will always run;
+ // even if the
+ // method returns somewhere in the try block.
+ if (synthAvailable) {
+ synthesizerLock.unlock();
+ }
+ }
+ }
+ }
+ Thread synth = (new Thread(new SynthThread()));
+ synth.setPriority(Thread.MIN_PRIORITY);
+ synth.start();
+ }
+
+ private SoundResource getSoundResource(SpeechItem speechItem) {
+ SoundResource sr = null;
+ String text = speechItem.mText;
+ if (speechItem.mType == SpeechItem.SILENCE) {
+ // Do nothing if this is just silence
+ } else if (speechItem.mType == SpeechItem.EARCON) {
+ sr = mEarcons.get(text);
+ } else {
+ sr = mUtterances.get(text);
+ }
+ return sr;
+ }
+
+ private void dispatchSpeechCompletedCallbacks(String mark) {
+ Log.i("TTS callback", "dispatch started");
+ // Broadcast to all clients the new value.
+ final int N = mCallbacks.beginBroadcast();
+ for (int i = 0; i < N; i++) {
+ try {
+ mCallbacks.getBroadcastItem(i).markReached(mark);
+ } catch (RemoteException e) {
+ // The RemoteCallbackList will take care of removing
+ // the dead object for us.
+ }
+ }
+ mCallbacks.finishBroadcast();
+ Log.i("TTS callback", "dispatch completed to " + N);
+ }
+
+ private void processSpeechQueue() {
+ boolean speechQueueAvailable = false;
+ try {
+ speechQueueAvailable = speechQueueLock.tryLock();
+ if (!speechQueueAvailable) {
+ return;
+ }
+ if (mSpeechQueue.size() < 1) {
+ mIsSpeaking = false;
+ // Dispatch a completion here as this is the
+ // only place where speech completes normally.
+ // Nothing left to say in the queue is a special case
+ // that is always a "mark" - associated text is null.
+ dispatchSpeechCompletedCallbacks("");
+ return;
+ }
+
+ SpeechItem currentSpeechItem = mSpeechQueue.get(0);
+ mIsSpeaking = true;
+ SoundResource sr = getSoundResource(currentSpeechItem);
+ // Synth speech as needed - synthesizer should call
+ // processSpeechQueue to continue running the queue
+ Log.i("TTS processing: ", currentSpeechItem.mText);
+ if (sr == null) {
+ if (currentSpeechItem.mType == SpeechItem.SPEECH) {
+ // TODO: Split text up into smaller chunks before accepting
+ // them
+ // for processing.
+ speakInternalOnly(currentSpeechItem.mText,
+ currentSpeechItem.mParams);
+ } else {
+ // This is either silence or an earcon that was missing
+ silence(currentSpeechItem.mDuration);
+ }
+ } else {
+ cleanUpPlayer();
+ if (sr.mSourcePackageName == PKGNAME) {
+ // Utterance is part of the TTS library
+ mPlayer = MediaPlayer.create(this, sr.mResId);
+ } else if (sr.mSourcePackageName != null) {
+ // Utterance is part of the app calling the library
+ Context ctx;
+ try {
+ ctx = this.createPackageContext(sr.mSourcePackageName,
+ 0);
+ } catch (NameNotFoundException e) {
+ e.printStackTrace();
+ mSpeechQueue.remove(0); // Remove it from the queue and
+ // move on
+ mIsSpeaking = false;
+ return;
+ }
+ mPlayer = MediaPlayer.create(ctx, sr.mResId);
+ } else {
+ // Utterance is coming from a file
+ mPlayer = MediaPlayer.create(this, Uri.parse(sr.mFilename));
+ }
+
+ // Check if Media Server is dead; if it is, clear the queue and
+ // give up for now - hopefully, it will recover itself.
+ if (mPlayer == null) {
+ mSpeechQueue.clear();
+ mIsSpeaking = false;
+ return;
+ }
+ mPlayer.setOnCompletionListener(this);
+ try {
+ mPlayer.start();
+ } catch (IllegalStateException e) {
+ mSpeechQueue.clear();
+ mIsSpeaking = false;
+ cleanUpPlayer();
+ return;
+ }
+ }
+ if (mSpeechQueue.size() > 0) {
+ mSpeechQueue.remove(0);
+ }
+ } finally {
+ // This check is needed because finally will always run; even if the
+ // method returns somewhere in the try block.
+ if (speechQueueAvailable) {
+ speechQueueLock.unlock();
+ }
+ }
+ }
+
+ private void cleanUpPlayer() {
+ if (mPlayer != null) {
+ mPlayer.release();
+ mPlayer = null;
+ }
+ }
+
+ /**
+ * Synthesizes the given text using the specified queuing mode and
+ * parameters.
+ *
+ * @param text
+ * The String of text that should be synthesized
+ * @param params
+ * An ArrayList of parameters. The first element of this array
+ * controls the type of voice to use.
+ * @param filename
+ * The string that gives the full output filename; it should be
+ * something like "/sdcard/myappsounds/mysound.wav".
+ * @return A boolean that indicates if the synthesis succeeded
+ */
+ private boolean synthesizeToFile(String text, ArrayList<String> params,
+ String filename, boolean calledFromApi) {
+ // Only stop everything if this is a call made by an outside app trying
+ // to
+ // use the API. Do NOT stop if this is a call from within the service as
+ // clearing the speech queue here would be a mistake.
+ if (calledFromApi) {
+ stop();
+ }
+ Log.i("TTS", "Synthesizing to " + filename);
+ boolean synthAvailable = false;
+ try {
+ synthAvailable = synthesizerLock.tryLock();
+ if (!synthAvailable) {
+ return false;
+ }
+ // Don't allow a filename that is too long
+ // TODO use platform constant
+ if (filename.length() > 250) {
+ return false;
+ }
+ nativeSynth.synthesizeToFile(text, filename);
+ } finally {
+ // This check is needed because finally will always run; even if the
+ // method returns somewhere in the try block.
+ if (synthAvailable) {
+ synthesizerLock.unlock();
+ }
+ }
+ Log.i("TTS", "Completed synthesis for " + filename);
+ return true;
+ }
+
+ @Override
+ public IBinder onBind(Intent intent) {
+ if (ACTION.equals(intent.getAction())) {
+ for (String category : intent.getCategories()) {
+ if (category.equals(CATEGORY)) {
+ return mBinder;
+ }
+ }
+ }
+ return null;
+ }
+
+ private final ITts.Stub mBinder = new Stub() {
+
+ public void registerCallback(ITtsCallback cb) {
+ if (cb != null)
+ mCallbacks.register(cb);
+ }
+
+ public void unregisterCallback(ITtsCallback cb) {
+ if (cb != null)
+ mCallbacks.unregister(cb);
+ }
+
+ /**
+ * Gives a hint about the type of engine that is preferred.
+ *
+ * @param selectedEngine
+ * The TTS engine that should be used
+ */
+ public void setEngine(String engineName, String[] supportedLanguages,
+ int strictness) {
+ mSelf.setEngine(engineName, supportedLanguages, strictness);
+ }
+
+ /**
+ * Specifies exactly what the engine has to support. Will always be
+ * considered "strict"; can be used for implementing
+ * optional/experimental features that are not supported by all engines.
+ *
+ * @param engineIntent
+ * An intent that specifies exactly what the engine has to
+ * support.
+ */
+ public void setEngineWithIntent(Intent engineIntent) {
+ mSelf.setEngine(engineIntent);
+ }
+
+ /**
+ * Speaks the given text using the specified queueing mode and
+ * parameters.
+ *
+ * @param text
+ * The text that should be spoken
+ * @param queueMode
+ * 0 for no queue (interrupts all previous utterances), 1 for
+ * queued
+ * @param params
+ * An ArrayList of parameters. The first element of this
+ * array controls the type of voice to use.
+ */
+ public void speak(String text, int queueMode, String[] params) {
+ ArrayList<String> speakingParams = new ArrayList<String>();
+ if (params != null) {
+ speakingParams = new ArrayList<String>(Arrays.asList(params));
+ }
+ mSelf.speak(text, queueMode, speakingParams);
+ }
+
+ /**
+ * Plays the earcon using the specified queueing mode and parameters.
+ *
+ * @param earcon
+ * The earcon that should be played
+ * @param queueMode
+ * 0 for no queue (interrupts all previous utterances), 1 for
+ * queued
+ * @param params
+ * An ArrayList of parameters.
+ */
+ public void playEarcon(String earcon, int queueMode, String[] params) {
+ ArrayList<String> speakingParams = new ArrayList<String>();
+ if (params != null) {
+ speakingParams = new ArrayList<String>(Arrays.asList(params));
+ }
+ mSelf.playEarcon(earcon, queueMode, speakingParams);
+ }
+
+ /**
+ * Plays the silence using the specified queueing mode and parameters.
+ *
+ * @param duration
+ * The duration of the silence that should be played
+ * @param queueMode
+ * 0 for no queue (interrupts all previous utterances), 1 for
+ * queued
+ * @param params
+ * An ArrayList of parameters.
+ */
+ public void playSilence(long duration, int queueMode, String[] params) {
+ ArrayList<String> speakingParams = new ArrayList<String>();
+ if (params != null) {
+ speakingParams = new ArrayList<String>(Arrays.asList(params));
+ }
+ mSelf.playSilence(duration, queueMode, speakingParams);
+ }
+
+
+ /**
+ * Stops all speech output and removes any utterances still in the
+ * queue.
+ */
+ public void stop() {
+ mSelf.stop();
+ }
+
+ /**
+ * Returns whether or not the TTS is speaking.
+ *
+ * @return Boolean to indicate whether or not the TTS is speaking
+ */
+ public boolean isSpeaking() {
+ return (mSelf.mIsSpeaking && (mSpeechQueue.size() < 1));
+ }
+
+ /**
+ * Adds a sound resource to the TTS.
+ *
+ * @param text
+ * The text that should be associated with the sound resource
+ * @param packageName
+ * The name of the package which has the sound resource
+ * @param resId
+ * The resource ID of the sound within its package
+ */
+ public void addSpeech(String text, String packageName, int resId) {
+ mSelf.addSpeech(text, packageName, resId);
+ }
+
+ /**
+ * Adds a sound resource to the TTS.
+ *
+ * @param text
+ * The text that should be associated with the sound resource
+ * @param filename
+ * The filename of the sound resource. This must be a
+ * complete path like: (/sdcard/mysounds/mysoundbite.mp3).
+ */
+ public void addSpeechFile(String text, String filename) {
+ mSelf.addSpeech(text, filename);
+ }
+
+ /**
+ * Adds a sound resource to the TTS as an earcon.
+ *
+ * @param earcon
+ * The text that should be associated with the sound resource
+ * @param packageName
+ * The name of the package which has the sound resource
+ * @param resId
+ * The resource ID of the sound within its package
+ */
+ public void addEarcon(String earcon, String packageName, int resId) {
+ mSelf.addEarcon(earcon, packageName, resId);
+ }
+
+ /**
+ * Adds a sound resource to the TTS as an earcon.
+ *
+ * @param earcon
+ * The text that should be associated with the sound resource
+ * @param filename
+ * The filename of the sound resource. This must be a
+ * complete path like: (/sdcard/mysounds/mysoundbite.mp3).
+ */
+ public void addEarconFile(String earcon, String filename) {
+ mSelf.addEarcon(earcon, filename);
+ }
+
+ /**
+ * Sets the speech rate for the TTS. Note that this will only have an
+ * effect on synthesized speech; it will not affect pre-recorded speech.
+ *
+ * @param speechRate
+ * The speech rate that should be used
+ */
+ public void setSpeechRate(int speechRate) {
+ mSelf.setSpeechRate(speechRate);
+ }
+
+ // TODO: Fix comment about language
+ /**
+ * Sets the speech rate for the TTS. Note that this will only have an
+ * effect on synthesized speech; it will not affect pre-recorded speech.
+ *
+ * @param language
+ * The language to be used. The languages are specified by
+ * their IETF language tags as defined by BCP 47. This is the
+ * same standard used for the lang attribute in HTML. See:
+ * http://en.wikipedia.org/wiki/IETF_language_tag
+ */
+ public void setLanguage(String language) {
+ mSelf.setLanguage(language);
+ }
+
+ /**
+ * Speaks the given text using the specified queueing mode and
+ * parameters.
+ *
+ * @param text
+ * The String of text that should be synthesized
+ * @param params
+ * An ArrayList of parameters. The first element of this
+ * array controls the type of voice to use.
+ * @param filename
+ * The string that gives the full output filename; it should
+ * be something like "/sdcard/myappsounds/mysound.wav".
+ * @return A boolean that indicates if the synthesis succeeded
+ */
+ public boolean synthesizeToFile(String text, String[] params,
+ String filename) {
+ ArrayList<String> speakingParams = new ArrayList<String>();
+ if (params != null) {
+ speakingParams = new ArrayList<String>(Arrays.asList(params));
+ }
+ return mSelf.synthesizeToFile(text, speakingParams, filename, true);
+ }
+ };
+
+}
diff --git a/tts/jni/Android.mk b/tts/jni/Android.mk
new file mode 100755
index 0000000..665d6d2
--- /dev/null
+++ b/tts/jni/Android.mk
@@ -0,0 +1,31 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+ android_tts_SynthProxy.cpp
+
+LOCAL_C_INCLUDES += \
+ $(JNI_H_INCLUDE)
+
+LOCAL_SHARED_LIBRARIES := \
+ libandroid_runtime \
+ libnativehelper \
+ libmedia \
+ libutils \
+ libcutils
+
+ifeq ($(TARGET_SIMULATOR),true)
+ LOCAL_LDLIBS += -ldl
+else
+ LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+
+LOCAL_MODULE:= libttssynthproxy
+
+LOCAL_ARM_MODE := arm
+
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/tts/jni/android_tts_SynthProxy.cpp b/tts/jni/android_tts_SynthProxy.cpp
new file mode 100755
index 0000000..d8f1bf3
--- /dev/null
+++ b/tts/jni/android_tts_SynthProxy.cpp
@@ -0,0 +1,595 @@
+/*
+ * Copyright (C) 2009 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+
+#define LOG_TAG "SynthProxy"
+
+#include <utils/Log.h>
+#include <nativehelper/jni.h>
+#include <nativehelper/JNIHelp.h>
+#include <android_runtime/AndroidRuntime.h>
+#include <tts/TtsEngine.h>
+#include <media/AudioTrack.h>
+
+#include <dlfcn.h>
+
+#define DEFAULT_TTS_RATE 16000
+#define DEFAULT_TTS_FORMAT AudioSystem::PCM_16_BIT
+#define DEFAULT_TTS_NB_CHANNELS 1
+
+#define USAGEMODE_PLAY_IMMEDIATELY 0
+#define USAGEMODE_WRITE_TO_FILE 1
+
+using namespace android;
+
+// ----------------------------------------------------------------------------
+struct fields_t {
+ jfieldID synthProxyFieldJniData;
+ jclass synthProxyClass;
+ jmethodID synthProxyMethodPost;
+};
+
+struct afterSynthData_t {
+ jint jniStorage;
+ int usageMode;
+ FILE* outputFile;
+};
+
+// ----------------------------------------------------------------------------
+static fields_t javaTTSFields;
+
+// ----------------------------------------------------------------------------
+class SynthProxyJniStorage {
+ public :
+ //jclass tts_class;
+ jobject tts_ref;
+ TtsEngine* mNativeSynthInterface;
+ AudioTrack* mAudioOut;
+ uint32_t mSampleRate;
+ AudioSystem::audio_format mAudFormat;
+ int mNbChannels;
+
+ SynthProxyJniStorage() {
+ //tts_class = NULL;
+ tts_ref = NULL;
+ mNativeSynthInterface = NULL;
+ mAudioOut = NULL;
+ mSampleRate = DEFAULT_TTS_RATE;
+ mAudFormat = DEFAULT_TTS_FORMAT;
+ mNbChannels = DEFAULT_TTS_NB_CHANNELS;
+ }
+
+ ~SynthProxyJniStorage() {
+ killAudio();
+ if (mNativeSynthInterface) {
+ mNativeSynthInterface->shutdown();
+ mNativeSynthInterface = NULL;
+ }
+ }
+
+ void killAudio() {
+ if (mAudioOut) {
+ mAudioOut->stop();
+ delete mAudioOut;
+ mAudioOut = NULL;
+ }
+ }
+
+ void createAudioOut(uint32_t rate, AudioSystem::audio_format format,
+ int channel) {
+ mSampleRate = rate;
+ mAudFormat = format;
+ mNbChannels = channel;
+
+ // TODO use the TTS stream type
+ int streamType = AudioSystem::MUSIC;
+
+ // retrieve system properties to ensure successful creation of the
+ // AudioTrack object for playback
+ int afSampleRate;
+ if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
+ afSampleRate = 44100;
+ }
+ int afFrameCount;
+ if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
+ afFrameCount = 2048;
+ }
+ uint32_t afLatency;
+ if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
+ afLatency = 500;
+ }
+ uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
+ if (minBufCount < 2) minBufCount = 2;
+ int minFrameCount = (afFrameCount * rate * minBufCount)/afSampleRate;
+
+ mAudioOut = new AudioTrack(streamType, rate, format, channel,
+ minFrameCount > 4096 ? minFrameCount : 4096,
+ 0, 0, 0, 0); // not using an AudioTrack callback
+
+ if (mAudioOut->initCheck() != NO_ERROR) {
+ LOGI("AudioTrack error");
+ delete mAudioOut;
+ mAudioOut = NULL;
+ } else {
+ LOGI("AudioTrack OK");
+ mAudioOut->start();
+ LOGI("AudioTrack started");
+ }
+ }
+};
+
+
+// ----------------------------------------------------------------------------
+void prepAudioTrack(SynthProxyJniStorage* pJniData,
+ uint32_t rate, AudioSystem::audio_format format, int channel)
+{
+ // Don't bother creating a new audiotrack object if the current
+ // object is already set.
+ if ( pJniData->mAudioOut &&
+ (rate == pJniData->mSampleRate) &&
+ (format == pJniData->mAudFormat) &&
+ (channel == pJniData->mNbChannels) ){
+ return;
+ }
+ if (pJniData->mAudioOut){
+ pJniData->killAudio();
+ }
+ pJniData->createAudioOut(rate, format, channel);
+}
+
+
+// ----------------------------------------------------------------------------
+/*
+ * Callback from TTS engine.
+ * Directly speaks using AudioTrack or write to file
+ */
+static void ttsSynthDoneCB(void * userdata, uint32_t rate,
+ AudioSystem::audio_format format, int channel,
+ int8_t *wav, size_t bufferSize) {
+ LOGI("ttsSynthDoneCallback: %d bytes", bufferSize);
+
+ afterSynthData_t* pForAfter = (afterSynthData_t*)userdata;
+
+ if (pForAfter->usageMode == USAGEMODE_PLAY_IMMEDIATELY){
+ LOGI("Direct speech");
+
+ if (wav == NULL) {
+ LOGI("Null: speech has completed");
+ }
+
+ if (bufferSize > 0) {
+ SynthProxyJniStorage* pJniData =
+ (SynthProxyJniStorage*)(pForAfter->jniStorage);
+ prepAudioTrack(pJniData, rate, format, channel);
+ if (pJniData->mAudioOut) {
+ pJniData->mAudioOut->write(wav, bufferSize);
+ LOGI("AudioTrack wrote: %d bytes", bufferSize);
+ } else {
+ LOGI("Can't play, null audiotrack");
+ }
+ }
+ } else if (pForAfter->usageMode == USAGEMODE_WRITE_TO_FILE) {
+ LOGI("Save to file");
+ if (wav == NULL) {
+ LOGI("Null: speech has completed");
+ }
+ if (bufferSize > 0){
+ fwrite(wav, 1, bufferSize, pForAfter->outputFile);
+ }
+ }
+ // TODO update to call back into the SynthProxy class through the
+ // javaTTSFields.synthProxyMethodPost methode to notify
+ // playback has completed
+
+ delete pForAfter;
+ return;
+}
+
+
+// ----------------------------------------------------------------------------
+static void
+android_tts_SynthProxy_native_setup(JNIEnv *env, jobject thiz,
+ jobject weak_this, jstring nativeSoLib)
+{
+ SynthProxyJniStorage* pJniStorage = new SynthProxyJniStorage();
+
+ prepAudioTrack(pJniStorage,
+ DEFAULT_TTS_RATE, DEFAULT_TTS_FORMAT, DEFAULT_TTS_NB_CHANNELS);
+
+ const char *nativeSoLibNativeString =
+ env->GetStringUTFChars(nativeSoLib, 0);
+
+ void *engine_lib_handle = dlopen(nativeSoLibNativeString,
+ RTLD_NOW | RTLD_LOCAL);
+ if (engine_lib_handle==NULL) {
+ LOGI("engine_lib_handle==NULL");
+ // TODO report error so the TTS can't be used
+ } else {
+ TtsEngine *(*get_TtsEngine)() =
+ reinterpret_cast<TtsEngine* (*)()>(dlsym(engine_lib_handle, "getTtsEngine"));
+ pJniStorage->mNativeSynthInterface = (*get_TtsEngine)();
+ if (pJniStorage->mNativeSynthInterface) {
+ pJniStorage->mNativeSynthInterface->init(ttsSynthDoneCB);
+ }
+ }
+
+ // we use a weak reference so the SynthProxy object can be garbage collected.
+ pJniStorage->tts_ref = env->NewGlobalRef(weak_this);
+
+ // save the JNI resources so we can use them (and free them) later
+ env->SetIntField(thiz, javaTTSFields.synthProxyFieldJniData,
+ (int)pJniStorage);
+
+ env->ReleaseStringUTFChars(nativeSoLib, nativeSoLibNativeString);
+}
+
+
+static void
+android_tts_SynthProxy_native_finalize(JNIEnv *env, jobject thiz, jint jniData)
+{
+ if (jniData) {
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ delete pSynthData;
+ }
+}
+
+
+static void
+android_tts_SynthProxy_setLanguage(JNIEnv *env, jobject thiz, jint jniData,
+ jstring language)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_setLanguage(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ const char *langNativeString = env->GetStringUTFChars(language, 0);
+ // TODO check return codes
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->setLanguage(langNativeString,
+ strlen(langNativeString));
+ }
+ env->ReleaseStringUTFChars(language, langNativeString);
+}
+
+
+static void
+android_tts_SynthProxy_setSpeechRate(JNIEnv *env, jobject thiz, jint jniData,
+ int speechRate)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_setSpeechRate(): invalid JNI data");
+ return;
+ }
+
+ int bufSize = 10;
+ char buffer [bufSize];
+ sprintf(buffer, "%d", speechRate);
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ LOGI("setting speech rate to %d", speechRate);
+ // TODO check return codes
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->setProperty("rate", buffer, bufSize);
+ }
+}
+
+
+// TODO: Refactor this to get rid of any assumptions about sample rate, etc.
+static void
+android_tts_SynthProxy_synthesizeToFile(JNIEnv *env, jobject thiz, jint jniData,
+ jstring textJavaString, jstring filenameJavaString)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_synthesizeToFile(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+
+ const char *filenameNativeString =
+ env->GetStringUTFChars(filenameJavaString, 0);
+ const char *textNativeString = env->GetStringUTFChars(textJavaString, 0);
+
+ afterSynthData_t* pForAfter = new (afterSynthData_t);
+ pForAfter->jniStorage = jniData;
+ pForAfter->usageMode = USAGEMODE_WRITE_TO_FILE;
+
+ pForAfter->outputFile = fopen(filenameNativeString, "wb");
+
+ // Write 44 blank bytes for WAV header, then come back and fill them in
+ // after we've written the audio data
+ char header[44];
+ fwrite(header, 1, 44, pForAfter->outputFile);
+
+ unsigned int unique_identifier;
+
+ // TODO check return codes
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->synthesizeText(textNativeString,
+ (void *)pForAfter);
+ }
+
+ long filelen = ftell(pForAfter->outputFile);
+
+ int samples = (((int)filelen) - 44) / 2;
+ header[0] = 'R';
+ header[1] = 'I';
+ header[2] = 'F';
+ header[3] = 'F';
+ ((uint32_t *)(&header[4]))[0] = filelen - 8;
+ header[8] = 'W';
+ header[9] = 'A';
+ header[10] = 'V';
+ header[11] = 'E';
+
+ header[12] = 'f';
+ header[13] = 'm';
+ header[14] = 't';
+ header[15] = ' ';
+
+ ((uint32_t *)(&header[16]))[0] = 16; // size of fmt
+
+ ((unsigned short *)(&header[20]))[0] = 1; // format
+ ((unsigned short *)(&header[22]))[0] = 1; // channels
+ ((uint32_t *)(&header[24]))[0] = 22050; // samplerate
+ ((uint32_t *)(&header[28]))[0] = 44100; // byterate
+ ((unsigned short *)(&header[32]))[0] = 2; // block align
+ ((unsigned short *)(&header[34]))[0] = 16; // bits per sample
+
+ header[36] = 'd';
+ header[37] = 'a';
+ header[38] = 't';
+ header[39] = 'a';
+
+ ((uint32_t *)(&header[40]))[0] = samples * 2; // size of data
+
+ // Skip back to the beginning and rewrite the header
+ fseek(pForAfter->outputFile, 0, SEEK_SET);
+ fwrite(header, 1, 44, pForAfter->outputFile);
+
+ fflush(pForAfter->outputFile);
+ fclose(pForAfter->outputFile);
+
+ env->ReleaseStringUTFChars(textJavaString, textNativeString);
+ env->ReleaseStringUTFChars(filenameJavaString, filenameNativeString);
+}
+
+
+static void
+android_tts_SynthProxy_speak(JNIEnv *env, jobject thiz, jint jniData,
+ jstring textJavaString)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_speak(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+
+ if (pSynthData->mAudioOut) {
+ pSynthData->mAudioOut->stop();
+ pSynthData->mAudioOut->start();
+ }
+
+ afterSynthData_t* pForAfter = new (afterSynthData_t);
+ pForAfter->jniStorage = jniData;
+ pForAfter->usageMode = USAGEMODE_PLAY_IMMEDIATELY;
+
+ if (pSynthData->mNativeSynthInterface) {
+ const char *textNativeString = env->GetStringUTFChars(textJavaString, 0);
+ pSynthData->mNativeSynthInterface->synthesizeText(textNativeString,
+ (void *)pForAfter);
+ env->ReleaseStringUTFChars(textJavaString, textNativeString);
+ }
+}
+
+
+static void
+android_tts_SynthProxy_stop(JNIEnv *env, jobject thiz, jint jniData)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_stop(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->stop();
+ }
+ if (pSynthData->mAudioOut) {
+ pSynthData->mAudioOut->stop();
+ }
+}
+
+
+static void
+android_tts_SynthProxy_shutdown(JNIEnv *env, jobject thiz, jint jniData)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_shutdown(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->shutdown();
+ pSynthData->mNativeSynthInterface = NULL;
+ }
+}
+
+
+// TODO add buffer format
+static void
+android_tts_SynthProxy_playAudioBuffer(JNIEnv *env, jobject thiz, jint jniData,
+ int bufferPointer, int bufferSize)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_playAudioBuffer(): invalid JNI data");
+ return;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ short* wav = (short*) bufferPointer;
+ pSynthData->mAudioOut->write(wav, bufferSize);
+ LOGI("AudioTrack wrote: %d bytes", bufferSize);
+}
+
+
+JNIEXPORT jstring JNICALL
+android_tts_SynthProxy_getLanguage(JNIEnv *env, jobject thiz, jint jniData)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_getLanguage(): invalid JNI data");
+ return env->NewStringUTF("");
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ size_t bufSize = 100;
+ char buf[bufSize];
+ memset(buf, 0, bufSize);
+ // TODO check return codes
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->getLanguage(buf, &bufSize);
+ }
+ return env->NewStringUTF(buf);
+}
+
+JNIEXPORT int JNICALL
+android_tts_SynthProxy_getRate(JNIEnv *env, jobject thiz, jint jniData)
+{
+ if (jniData == 0) {
+ LOGE("android_tts_SynthProxy_getRate(): invalid JNI data");
+ return 0;
+ }
+
+ SynthProxyJniStorage* pSynthData = (SynthProxyJniStorage*)jniData;
+ size_t bufSize = 100;
+
+ char buf[bufSize];
+ memset(buf, 0, bufSize);
+ // TODO check return codes
+ if (pSynthData->mNativeSynthInterface) {
+ pSynthData->mNativeSynthInterface->getProperty("rate", buf, &bufSize);
+ }
+ return atoi(buf);
+}
+
+// Dalvik VM type signatures
+static JNINativeMethod gMethods[] = {
+ { "native_stop",
+ "(I)V",
+ (void*)android_tts_SynthProxy_stop
+ },
+ { "native_speak",
+ "(ILjava/lang/String;)V",
+ (void*)android_tts_SynthProxy_speak
+ },
+ { "native_synthesizeToFile",
+ "(ILjava/lang/String;Ljava/lang/String;)V",
+ (void*)android_tts_SynthProxy_synthesizeToFile
+ },
+ { "native_setLanguage",
+ "(ILjava/lang/String;)V",
+ (void*)android_tts_SynthProxy_setLanguage
+ },
+ { "native_setSpeechRate",
+ "(II)V",
+ (void*)android_tts_SynthProxy_setSpeechRate
+ },
+ { "native_playAudioBuffer",
+ "(III)V",
+ (void*)android_tts_SynthProxy_playAudioBuffer
+ },
+ { "native_getLanguage",
+ "(I)Ljava/lang/String;",
+ (void*)android_tts_SynthProxy_getLanguage
+ },
+ { "native_getRate",
+ "(I)I",
+ (void*)android_tts_SynthProxy_getRate
+ },
+ { "native_shutdown",
+ "(I)V",
+ (void*)android_tts_SynthProxy_shutdown
+ },
+ { "native_setup",
+ "(Ljava/lang/Object;Ljava/lang/String;)V",
+ (void*)android_tts_SynthProxy_native_setup
+ },
+ { "native_finalize",
+ "(I)V",
+ (void*)android_tts_SynthProxy_native_finalize
+ }
+};
+
+#define SP_JNIDATA_FIELD_NAME "mJniData"
+#define SP_POSTSPEECHSYNTHESIZED_METHOD_NAME "postNativeSpeechSynthesizedInJava"
+
+// TODO: verify this is the correct path
+static const char* const kClassPathName = "android/tts/SynthProxy";
+
+jint JNI_OnLoad(JavaVM* vm, void* reserved)
+{
+ JNIEnv* env = NULL;
+ jint result = -1;
+ jclass clazz;
+
+ if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {
+ LOGE("ERROR: GetEnv failed\n");
+ goto bail;
+ }
+ assert(env != NULL);
+
+ clazz = env->FindClass(kClassPathName);
+ if (clazz == NULL) {
+ LOGE("Can't find %s", kClassPathName);
+ goto bail;
+ }
+
+ javaTTSFields.synthProxyClass = clazz;
+ javaTTSFields.synthProxyFieldJniData = NULL;
+ javaTTSFields.synthProxyMethodPost = NULL;
+
+ javaTTSFields.synthProxyFieldJniData = env->GetFieldID(clazz,
+ SP_JNIDATA_FIELD_NAME, "I");
+ if (javaTTSFields.synthProxyFieldJniData == NULL) {
+ LOGE("Can't find %s.%s field", kClassPathName, SP_JNIDATA_FIELD_NAME);
+ goto bail;
+ }
+
+ javaTTSFields.synthProxyMethodPost = env->GetStaticMethodID(clazz,
+ SP_POSTSPEECHSYNTHESIZED_METHOD_NAME, "(Ljava/lang/Object;II)V");
+ if (javaTTSFields.synthProxyMethodPost == NULL) {
+ LOGE("Can't find %s.%s method", kClassPathName, SP_POSTSPEECHSYNTHESIZED_METHOD_NAME);
+ goto bail;
+ }
+
+ if (jniRegisterNativeMethods(
+ env, kClassPathName, gMethods, NELEM(gMethods)) < 0)
+ goto bail;
+
+ /* success -- return valid version number */
+ result = JNI_VERSION_1_4;
+
+ bail:
+ return result;
+}