Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 17 | #define LOG_TAG "AAudioServiceStreamShared" |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 18 | //#define LOG_NDEBUG 0 |
| 19 | #include <utils/Log.h> |
| 20 | |
| 21 | #include <mutex> |
| 22 | |
| 23 | #include <aaudio/AAudio.h> |
| 24 | |
| 25 | #include "binding/IAAudioService.h" |
| 26 | |
| 27 | #include "binding/AAudioServiceMessage.h" |
| 28 | #include "AAudioServiceStreamBase.h" |
| 29 | #include "AAudioServiceStreamShared.h" |
| 30 | #include "AAudioEndpointManager.h" |
| 31 | #include "AAudioService.h" |
| 32 | #include "AAudioServiceEndpoint.h" |
| 33 | |
| 34 | using namespace android; |
| 35 | using namespace aaudio; |
| 36 | |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 37 | #define MIN_BURSTS_PER_BUFFER 2 |
| 38 | #define DEFAULT_BURSTS_PER_BUFFER 16 |
| 39 | // This is an arbitrary range. TODO review. |
| 40 | #define MAX_FRAMES_PER_BUFFER (32 * 1024) |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 41 | |
| 42 | AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService) |
| 43 | : mAudioService(audioService) |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame^] | 44 | , mTimestampPositionOffset(0) |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 45 | { |
| 46 | } |
| 47 | |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 48 | int32_t AAudioServiceStreamShared::calculateBufferCapacity(int32_t requestedCapacityFrames, |
| 49 | int32_t framesPerBurst) { |
| 50 | |
| 51 | if (requestedCapacityFrames > MAX_FRAMES_PER_BUFFER) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 52 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() requested capacity %d > max %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 53 | requestedCapacityFrames, MAX_FRAMES_PER_BUFFER); |
| 54 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 55 | } |
| 56 | |
| 57 | // Determine how many bursts will fit in the buffer. |
| 58 | int32_t numBursts; |
| 59 | if (requestedCapacityFrames == AAUDIO_UNSPECIFIED) { |
| 60 | // Use fewer bursts if default is too many. |
| 61 | if ((DEFAULT_BURSTS_PER_BUFFER * framesPerBurst) > MAX_FRAMES_PER_BUFFER) { |
| 62 | numBursts = MAX_FRAMES_PER_BUFFER / framesPerBurst; |
| 63 | } else { |
| 64 | numBursts = DEFAULT_BURSTS_PER_BUFFER; |
| 65 | } |
| 66 | } else { |
| 67 | // round up to nearest burst boundary |
| 68 | numBursts = (requestedCapacityFrames + framesPerBurst - 1) / framesPerBurst; |
| 69 | } |
| 70 | |
| 71 | // Clip to bare minimum. |
| 72 | if (numBursts < MIN_BURSTS_PER_BUFFER) { |
| 73 | numBursts = MIN_BURSTS_PER_BUFFER; |
| 74 | } |
| 75 | // Check for numeric overflow. |
| 76 | if (numBursts > 0x8000 || framesPerBurst > 0x8000) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 77 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() overflow, capacity = %d * %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 78 | numBursts, framesPerBurst); |
| 79 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 80 | } |
| 81 | int32_t capacityInFrames = numBursts * framesPerBurst; |
| 82 | |
| 83 | // Final sanity check. |
| 84 | if (capacityInFrames > MAX_FRAMES_PER_BUFFER) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 85 | ALOGE("AAudioServiceStreamShared::calculateBufferCapacity() calc capacity %d > max %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 86 | capacityInFrames, MAX_FRAMES_PER_BUFFER); |
| 87 | return AAUDIO_ERROR_OUT_OF_RANGE; |
| 88 | } |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 89 | ALOGD("AAudioServiceStreamShared::calculateBufferCapacity() requested %d frames, actual = %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 90 | requestedCapacityFrames, capacityInFrames); |
| 91 | return capacityInFrames; |
| 92 | } |
| 93 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 94 | aaudio_result_t AAudioServiceStreamShared::open(const aaudio::AAudioStreamRequest &request, |
| 95 | aaudio::AAudioStreamConfiguration &configurationOutput) { |
| 96 | |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 97 | sp<AAudioServiceStreamShared> keep(this); |
| 98 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 99 | aaudio_result_t result = AAudioServiceStreamBase::open(request, configurationOutput); |
| 100 | if (result != AAUDIO_OK) { |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 101 | ALOGE("AAudioServiceStreamBase open() returned %d", result); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 102 | return result; |
| 103 | } |
| 104 | |
| 105 | const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 106 | aaudio_direction_t direction = request.getDirection(); |
| 107 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 108 | AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance(); |
Eric Laurent | a17ae74 | 2017-06-29 15:43:55 -0700 | [diff] [blame] | 109 | mServiceEndpoint = mEndpointManager.openEndpoint(mAudioService, configurationOutput, direction); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 110 | if (mServiceEndpoint == nullptr) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 111 | ALOGE("AAudioServiceStreamShared::open() mServiceEndPoint = %p", mServiceEndpoint); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 112 | return AAUDIO_ERROR_UNAVAILABLE; |
| 113 | } |
| 114 | |
| 115 | // Is the request compatible with the shared endpoint? |
jiabin | 901f65d | 2017-07-12 17:56:35 -0700 | [diff] [blame] | 116 | mAudioFormat = configurationInput.getFormat(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 117 | if (mAudioFormat == AAUDIO_FORMAT_UNSPECIFIED) { |
| 118 | mAudioFormat = AAUDIO_FORMAT_PCM_FLOAT; |
| 119 | } else if (mAudioFormat != AAUDIO_FORMAT_PCM_FLOAT) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 120 | ALOGE("AAudioServiceStreamShared::open() mAudioFormat = %d, need FLOAT", mAudioFormat); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 121 | result = AAUDIO_ERROR_INVALID_FORMAT; |
| 122 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 123 | } |
| 124 | |
| 125 | mSampleRate = configurationInput.getSampleRate(); |
Glenn Kasten | 37a466a | 2017-05-30 15:53:14 -0700 | [diff] [blame] | 126 | if (mSampleRate == AAUDIO_UNSPECIFIED) { |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 127 | mSampleRate = mServiceEndpoint->getSampleRate(); |
| 128 | } else if (mSampleRate != mServiceEndpoint->getSampleRate()) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 129 | ALOGE("AAudioServiceStreamShared::open() mSampleRate = %d, need %d", |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 130 | mSampleRate, mServiceEndpoint->getSampleRate()); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 131 | result = AAUDIO_ERROR_INVALID_RATE; |
| 132 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | mSamplesPerFrame = configurationInput.getSamplesPerFrame(); |
Glenn Kasten | 37a466a | 2017-05-30 15:53:14 -0700 | [diff] [blame] | 136 | if (mSamplesPerFrame == AAUDIO_UNSPECIFIED) { |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 137 | mSamplesPerFrame = mServiceEndpoint->getSamplesPerFrame(); |
| 138 | } else if (mSamplesPerFrame != mServiceEndpoint->getSamplesPerFrame()) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 139 | ALOGE("AAudioServiceStreamShared::open() mSamplesPerFrame = %d, need %d", |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 140 | mSamplesPerFrame, mServiceEndpoint->getSamplesPerFrame()); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 141 | result = AAUDIO_ERROR_OUT_OF_RANGE; |
| 142 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 143 | } |
| 144 | |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 145 | mFramesPerBurst = mServiceEndpoint->getFramesPerBurst(); |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 146 | ALOGD("AAudioServiceStreamShared::open() mSampleRate = %d, mFramesPerBurst = %d", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 147 | mSampleRate, mFramesPerBurst); |
| 148 | |
| 149 | mCapacityInFrames = calculateBufferCapacity(configurationInput.getBufferCapacity(), |
| 150 | mFramesPerBurst); |
| 151 | if (mCapacityInFrames < 0) { |
| 152 | result = mCapacityInFrames; // negative error code |
| 153 | mCapacityInFrames = 0; |
| 154 | goto error; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 155 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 156 | |
| 157 | // Create audio data shared memory buffer for client. |
| 158 | mAudioDataQueue = new SharedRingBuffer(); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 159 | result = mAudioDataQueue->allocate(calculateBytesPerFrame(), mCapacityInFrames); |
| 160 | if (result != AAUDIO_OK) { |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 161 | ALOGE("AAudioServiceStreamShared::open() could not allocate FIFO with %d frames", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 162 | mCapacityInFrames); |
| 163 | result = AAUDIO_ERROR_NO_MEMORY; |
| 164 | goto error; |
| 165 | } |
| 166 | |
| 167 | ALOGD("AAudioServiceStreamShared::open() actual rate = %d, channels = %d, deviceId = %d", |
| 168 | mSampleRate, mSamplesPerFrame, mServiceEndpoint->getDeviceId()); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 169 | |
| 170 | // Fill in configuration for client. |
| 171 | configurationOutput.setSampleRate(mSampleRate); |
| 172 | configurationOutput.setSamplesPerFrame(mSamplesPerFrame); |
jiabin | 901f65d | 2017-07-12 17:56:35 -0700 | [diff] [blame] | 173 | configurationOutput.setFormat(mAudioFormat); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 174 | configurationOutput.setDeviceId(mServiceEndpoint->getDeviceId()); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 175 | |
Phil Burk | 11e8d33 | 2017-05-24 09:59:02 -0700 | [diff] [blame] | 176 | result = mServiceEndpoint->registerStream(keep); |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 177 | if (result != AAUDIO_OK) { |
| 178 | goto error; |
| 179 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 180 | |
Phil Burk | 5a26e66 | 2017-07-07 12:44:48 -0700 | [diff] [blame] | 181 | setState(AAUDIO_STREAM_STATE_OPEN); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 182 | return AAUDIO_OK; |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 183 | |
| 184 | error: |
| 185 | close(); |
| 186 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | /** |
| 190 | * Start the flow of audio data. |
| 191 | * |
| 192 | * An AAUDIO_SERVICE_EVENT_STARTED will be sent to the client when complete. |
| 193 | */ |
| 194 | aaudio_result_t AAudioServiceStreamShared::start() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 195 | if (isRunning()) { |
| 196 | return AAUDIO_OK; |
| 197 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 198 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 199 | if (endpoint == nullptr) { |
| 200 | return AAUDIO_ERROR_INVALID_STATE; |
| 201 | } |
Phil Burk | 87c9f64 | 2017-05-17 07:22:39 -0700 | [diff] [blame] | 202 | // For output streams, this will add the stream to the mixer. |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 203 | aaudio_result_t result = endpoint->startStream(this); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 204 | if (result != AAUDIO_OK) { |
| 205 | ALOGE("AAudioServiceStreamShared::start() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 206 | disconnect(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 207 | } else { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 208 | result = endpoint->getStreamInternal()->startClient(mMmapClient, &mClientHandle); |
| 209 | if (result == AAUDIO_OK) { |
| 210 | result = AAudioServiceStreamBase::start(); |
| 211 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 212 | } |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 213 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | /** |
| 217 | * Stop the flow of data so that start() can resume without loss of data. |
| 218 | * |
| 219 | * An AAUDIO_SERVICE_EVENT_PAUSED will be sent to the client when complete. |
| 220 | */ |
| 221 | aaudio_result_t AAudioServiceStreamShared::pause() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 222 | if (!isRunning()) { |
| 223 | return AAUDIO_OK; |
| 224 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 225 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 226 | if (endpoint == nullptr) { |
| 227 | return AAUDIO_ERROR_INVALID_STATE; |
| 228 | } |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 229 | endpoint->getStreamInternal()->stopClient(mClientHandle); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 230 | aaudio_result_t result = endpoint->stopStream(this); |
| 231 | if (result != AAUDIO_OK) { |
| 232 | ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 233 | disconnect(); // TODO should we return or pause Base first? |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 234 | } |
| 235 | return AAudioServiceStreamBase::pause(); |
| 236 | } |
| 237 | |
| 238 | aaudio_result_t AAudioServiceStreamShared::stop() { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 239 | if (!isRunning()) { |
| 240 | return AAUDIO_OK; |
| 241 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 242 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 243 | if (endpoint == nullptr) { |
| 244 | return AAUDIO_ERROR_INVALID_STATE; |
| 245 | } |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 246 | endpoint->getStreamInternal()->stopClient(mClientHandle); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 247 | aaudio_result_t result = endpoint->stopStream(this); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 248 | if (result != AAUDIO_OK) { |
| 249 | ALOGE("AAudioServiceStreamShared::stop() mServiceEndpoint returned %d", result); |
Phil Burk | 5ef003b | 2017-06-30 11:43:37 -0700 | [diff] [blame] | 250 | disconnect(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 251 | } |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 252 | return AAudioServiceStreamBase::stop(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | /** |
| 256 | * Discard any data held by the underlying HAL or Service. |
| 257 | * |
| 258 | * An AAUDIO_SERVICE_EVENT_FLUSHED will be sent to the client when complete. |
| 259 | */ |
| 260 | aaudio_result_t AAudioServiceStreamShared::flush() { |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 261 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 262 | if (endpoint == nullptr) { |
| 263 | return AAUDIO_ERROR_INVALID_STATE; |
| 264 | } |
| 265 | if (mState != AAUDIO_STREAM_STATE_PAUSED) { |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 266 | ALOGE("AAudioServiceStreamShared::flush() stream not paused, state = %s", |
Phil Burk | ec89b2e | 2017-06-20 15:05:06 -0700 | [diff] [blame] | 267 | AAudio_convertStreamStateToText(mState)); |
| 268 | return AAUDIO_ERROR_INVALID_STATE; |
| 269 | } |
| 270 | // Data will get flushed when the client receives the FLUSHED event. |
| 271 | return AAudioServiceStreamBase::flush(); |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | aaudio_result_t AAudioServiceStreamShared::close() { |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 275 | if (mState == AAUDIO_STREAM_STATE_CLOSED) { |
| 276 | return AAUDIO_OK; |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 277 | } |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 278 | |
Eric Laurent | cb4dae2 | 2017-07-01 19:39:32 -0700 | [diff] [blame] | 279 | stop(); |
| 280 | |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 281 | AAudioServiceEndpoint *endpoint = mServiceEndpoint; |
| 282 | if (endpoint == nullptr) { |
| 283 | return AAUDIO_ERROR_INVALID_STATE; |
| 284 | } |
Phil Burk | 98d6d92 | 2017-07-06 11:52:45 -0700 | [diff] [blame] | 285 | |
| 286 | endpoint->unregisterStream(this); |
| 287 | |
| 288 | AAudioEndpointManager &mEndpointManager = AAudioEndpointManager::getInstance(); |
| 289 | mEndpointManager.closeEndpoint(endpoint); |
| 290 | mServiceEndpoint = nullptr; |
| 291 | |
Phil Burk | 942bdc0 | 2017-05-03 11:36:50 -0700 | [diff] [blame] | 292 | if (mAudioDataQueue != nullptr) { |
| 293 | delete mAudioDataQueue; |
| 294 | mAudioDataQueue = nullptr; |
| 295 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 296 | return AAudioServiceStreamBase::close(); |
| 297 | } |
| 298 | |
| 299 | /** |
| 300 | * Get an immutable description of the data queue created by this service. |
| 301 | */ |
| 302 | aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointParcelable &parcelable) |
| 303 | { |
| 304 | // Gather information on the data queue. |
| 305 | mAudioDataQueue->fillParcelable(parcelable, |
| 306 | parcelable.mDownDataQueueParcelable); |
| 307 | parcelable.mDownDataQueueParcelable.setFramesPerBurst(getFramesPerBurst()); |
| 308 | return AAUDIO_OK; |
| 309 | } |
| 310 | |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame^] | 311 | void AAudioServiceStreamShared::markTransferTime(Timestamp ×tamp) { |
| 312 | mAtomicTimestamp.write(timestamp); |
Phil Burk | 71f35bb | 2017-04-13 16:05:07 -0700 | [diff] [blame] | 313 | } |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 314 | |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame^] | 315 | // Get timestamp that was written by the real-time service thread, eg. mixer. |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 316 | aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames, |
| 317 | int64_t *timeNanos) { |
Phil Burk | 97350f9 | 2017-07-21 15:59:44 -0700 | [diff] [blame^] | 318 | if (mAtomicTimestamp.isValid()) { |
| 319 | Timestamp timestamp = mAtomicTimestamp.read(); |
| 320 | *positionFrames = timestamp.getPosition(); |
| 321 | *timeNanos = timestamp.getNanoseconds(); |
| 322 | return AAUDIO_OK; |
| 323 | } else { |
| 324 | return AAUDIO_ERROR_UNAVAILABLE; |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | // Get timestamp from lower level service. |
| 329 | aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames, |
| 330 | int64_t *timeNanos) { |
| 331 | |
| 332 | aaudio_result_t result = mServiceEndpoint->getTimestamp(positionFrames, timeNanos); |
| 333 | if (result == AAUDIO_OK) { |
| 334 | *positionFrames -= mTimestampPositionOffset.load(); // Offset from shared MMAP stream |
| 335 | } |
| 336 | return result; |
Phil Burk | c0c70e3 | 2017-02-09 13:18:38 -0800 | [diff] [blame] | 337 | } |