blob: 6e8498e30695c284074ae287e2f0012d4402b1cb [file] [log] [blame]
Mikhail Naganov159260c2020-07-23 18:08:26 +00001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17package android.hardware.audio@7.0;
18
19import android.hardware.audio.common@7.0;
20import IStream;
21import IStreamOutCallback;
22import IStreamOutEventCallback;
23
24interface IStreamOut extends IStream {
25 /**
26 * Return the audio hardware driver estimated latency in milliseconds.
27 *
28 * @return latencyMs latency in milliseconds.
29 */
30 getLatency() generates (uint32_t latencyMs);
31
32 /**
33 * This method is used in situations where audio mixing is done in the
34 * hardware. This method serves as a direct interface with hardware,
35 * allowing to directly set the volume as apposed to via the framework.
36 * This method might produce multiple PCM outputs or hardware accelerated
37 * codecs, such as MP3 or AAC.
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -080038 *
Mikhail Naganov159260c2020-07-23 18:08:26 +000039 * Optional method
40 *
41 * @param left left channel attenuation, 1.0f is unity, 0.0f is zero.
42 * @param right right channel attenuation, 1.0f is unity, 0.0f is zero.
43 * @return retval operation completion status.
44 * If a volume is outside [0,1], return INVALID_ARGUMENTS
45 */
46 setVolume(float left, float right) generates (Result retval);
47
48 /**
Mikhail Naganov3f1457b2020-12-17 15:01:54 -080049 * Called when the metadata of the stream's source has been changed.
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -080050 *
Mikhail Naganov3f1457b2020-12-17 15:01:54 -080051 * Optional method
52 *
53 * @param sourceMetadata Description of the audio that is played by the clients.
54 * @return retval operation completion status.
55 * If any of the metadata fields contains an invalid value,
56 * returns INVALID_ARGUMENTS.
57 * If method isn't supported by the HAL returns NOT_SUPPORTED.
58 */
59 updateSourceMetadata(SourceMetadata sourceMetadata) generates (Result retval);
60
61 /**
Mikhail Naganov159260c2020-07-23 18:08:26 +000062 * Commands that can be executed on the driver writer thread.
63 */
64 enum WriteCommand : int32_t {
65 WRITE,
66 GET_PRESENTATION_POSITION,
67 GET_LATENCY
68 };
69
70 /**
71 * Data structure passed back to the client via status message queue
72 * of 'write' operation.
73 *
74 * Possible values of 'retval' field:
75 * - OK, write operation was successful;
76 * - INVALID_ARGUMENTS, stream was not configured properly;
77 * - INVALID_STATE, stream is in a state that doesn't allow writes;
78 * - INVALID_OPERATION, retrieving presentation position isn't supported.
79 */
80 struct WriteStatus {
81 Result retval;
82 WriteCommand replyTo; // discriminator
83 union Reply {
84 uint64_t written; // WRITE command, amount of bytes written, >= 0.
85 struct PresentationPosition { // same as generated by
86 uint64_t frames; // getPresentationPosition.
87 TimeSpec timeStamp;
88 } presentationPosition;
89 uint32_t latencyMs; // Same as generated by getLatency.
90 } reply;
91 };
92
93 /**
Mikhail Naganov159260c2020-07-23 18:08:26 +000094 * Set up required transports for passing audio buffers to the driver.
95 *
96 * The transport consists of three message queues:
97 * -- command queue is used to instruct the writer thread what operation
98 * to perform;
99 * -- data queue is used for passing audio data from the client
100 * to the driver;
101 * -- status queue is used for reporting operation status
102 * (e.g. amount of bytes actually written or error code).
103 *
104 * The driver operates on a dedicated thread. The client must ensure that
105 * the thread is given an appropriate priority and assigned to correct
Mikhail Naganov7dd87f42020-08-04 23:37:05 +0000106 * scheduler and cgroup. For this purpose, the method returns the identifier
Mikhail Naganov159260c2020-07-23 18:08:26 +0000107 * of the driver thread.
108 *
109 * @param frameSize the size of a single frame, in bytes.
110 * @param framesCount the number of frames in a buffer.
111 * @return retval OK if both message queues were created successfully.
112 * INVALID_STATE if the method was already called.
113 * INVALID_ARGUMENTS if there was a problem setting up
114 * the queues.
115 * @return commandMQ a message queue used for passing commands.
116 * @return dataMQ a message queue used for passing audio data in the format
117 * specified at the stream opening.
118 * @return statusMQ a message queue used for passing status from the driver
119 * using WriteStatus structures.
Mikhail Naganov7dd87f42020-08-04 23:37:05 +0000120 * @return threadId identifier of the driver's dedicated thread; the caller
121 * may adjust the thread priority to match the priority
122 * of the thread that provides audio data.
Mikhail Naganov159260c2020-07-23 18:08:26 +0000123 */
124 prepareForWriting(uint32_t frameSize, uint32_t framesCount)
125 generates (
126 Result retval,
127 fmq_sync<WriteCommand> commandMQ,
128 fmq_sync<uint8_t> dataMQ,
129 fmq_sync<WriteStatus> statusMQ,
Mikhail Naganov7dd87f42020-08-04 23:37:05 +0000130 int32_t threadId);
Mikhail Naganov159260c2020-07-23 18:08:26 +0000131
132 /**
133 * Return the number of audio frames written by the audio DSP to DAC since
134 * the output has exited standby.
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -0800135 *
Mikhail Naganov159260c2020-07-23 18:08:26 +0000136 * Optional method
137 *
138 * @return retval operation completion status.
139 * @return dspFrames number of audio frames written.
140 */
141 getRenderPosition() generates (Result retval, uint32_t dspFrames);
142
143 /**
144 * Get the local time at which the next write to the audio driver will be
145 * presented. The units are microseconds, where the epoch is decided by the
146 * local audio HAL.
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -0800147 *
Mikhail Naganov159260c2020-07-23 18:08:26 +0000148 * Optional method
149 *
150 * @return retval operation completion status.
151 * @return timestampUs time of the next write.
152 */
153 getNextWriteTimestamp() generates (Result retval, int64_t timestampUs);
154
155 /**
156 * Set the callback interface for notifying completion of non-blocking
157 * write and drain.
158 *
159 * Calling this function implies that all future 'write' and 'drain'
160 * must be non-blocking and use the callback to signal completion.
161 *
162 * 'clearCallback' method needs to be called in order to release the local
163 * callback proxy on the server side and thus dereference the callback
164 * implementation on the client side.
165 *
166 * @return retval operation completion status.
167 */
168 setCallback(IStreamOutCallback callback) generates (Result retval);
169
170 /**
171 * Clears the callback previously set via 'setCallback' method.
172 *
173 * Warning: failure to call this method results in callback implementation
174 * on the client side being held until the HAL server termination.
175 *
176 * If no callback was previously set, the method should be a no-op
177 * and return OK.
178 *
179 * @return retval operation completion status: OK or NOT_SUPPORTED.
180 */
181 clearCallback() generates (Result retval);
182
183 /**
184 * Set the callback interface for notifying about an output stream event.
185 *
186 * Calling this method with a null pointer will result in releasing
187 * the local callback proxy on the server side and thus dereference
188 * the callback implementation on the client side.
189 *
190 * @return retval operation completion status.
191 */
192 setEventCallback(IStreamOutEventCallback callback)
193 generates (Result retval);
194
195 /**
196 * Returns whether HAL supports pausing and resuming of streams.
197 *
198 * @return supportsPause true if pausing is supported.
199 * @return supportsResume true if resume is supported.
200 */
201 supportsPauseAndResume()
202 generates (bool supportsPause, bool supportsResume);
203
204 /**
205 * Notifies to the audio driver to stop playback however the queued buffers
206 * are retained by the hardware. Useful for implementing pause/resume. Empty
207 * implementation if not supported however must be implemented for hardware
208 * with non-trivial latency. In the pause state, some audio hardware may
209 * still be using power. Client code may consider calling 'suspend' after a
210 * timeout to prevent that excess power usage.
211 *
212 * Implementation of this function is mandatory for offloaded playback.
213 *
214 * @return retval operation completion status.
215 */
216 pause() generates (Result retval);
217
218 /**
219 * Notifies to the audio driver to resume playback following a pause.
220 * Returns error INVALID_STATE if called without matching pause.
221 *
222 * Implementation of this function is mandatory for offloaded playback.
223 *
224 * @return retval operation completion status.
225 */
226 resume() generates (Result retval);
227
228 /**
229 * Returns whether HAL supports draining of streams.
230 *
231 * @return supports true if draining is supported.
232 */
233 supportsDrain() generates (bool supports);
234
235 /**
236 * Requests notification when data buffered by the driver/hardware has been
237 * played. If 'setCallback' has previously been called to enable
238 * non-blocking mode, then 'drain' must not block, instead it must return
239 * quickly and completion of the drain is notified through the callback. If
240 * 'setCallback' has not been called, then 'drain' must block until
241 * completion.
242 *
243 * If 'type' is 'ALL', the drain completes when all previously written data
244 * has been played.
245 *
246 * If 'type' is 'EARLY_NOTIFY', the drain completes shortly before all data
247 * for the current track has played to allow time for the framework to
248 * perform a gapless track switch.
249 *
250 * Drain must return immediately on 'stop' and 'flush' calls.
251 *
252 * Implementation of this function is mandatory for offloaded playback.
253 *
254 * @param type type of drain.
255 * @return retval operation completion status.
256 */
257 drain(AudioDrain type) generates (Result retval);
258
259 /**
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -0800260 * Notifies to the audio driver to flush (that is, drop) the queued
261 * data. Stream must already be paused before calling 'flush'. For
262 * compressed and offload streams the frame count returned by
263 * 'getPresentationPosition' must reset after flush.
264 *
Mikhail Naganov159260c2020-07-23 18:08:26 +0000265 * Optional method
266 *
267 * Implementation of this function is mandatory for offloaded playback.
268 *
269 * @return retval operation completion status.
270 */
271 flush() generates (Result retval);
272
273 /**
274 * Return a recent count of the number of audio frames presented to an
275 * external observer. This excludes frames which have been written but are
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -0800276 * still in the pipeline. The count must not reset to zero when a PCM output
277 * enters standby. For compressed and offload streams it is recommended that
278 * HAL resets the frame count.
279 *
280 * This method also returns the value of CLOCK_MONOTONIC as of this
Mikhail Naganov159260c2020-07-23 18:08:26 +0000281 * presentation count. The returned count is expected to be 'recent', but
282 * does not need to be the most recent possible value. However, the
283 * associated time must correspond to whatever count is returned.
Mikhail Naganov159260c2020-07-23 18:08:26 +0000284 * Example: assume that N+M frames have been presented, where M is a 'small'
285 * number. Then it is permissible to return N instead of N+M, and the
286 * timestamp must correspond to N rather than N+M. The terms 'recent' and
287 * 'small' are not defined. They reflect the quality of the implementation.
288 *
Mikhail Naganov159260c2020-07-23 18:08:26 +0000289 * @return retval operation completion status.
290 * @return frames count of presented audio frames.
291 * @return timeStamp associated clock time.
292 */
293 getPresentationPosition()
294 generates (Result retval, uint64_t frames, TimeSpec timeStamp);
295
296 /**
297 * Selects a presentation for decoding from a next generation media stream
298 * (as defined per ETSI TS 103 190-2) and a program within the presentation.
Mikhail Naganovaa88f5b2021-03-08 09:20:26 -0800299 *
Mikhail Naganov159260c2020-07-23 18:08:26 +0000300 * Optional method
301 *
302 * @param presentationId selected audio presentation.
303 * @param programId refinement for the presentation.
304 * @return retval operation completion status.
305 */
306 selectPresentation(int32_t presentationId, int32_t programId)
307 generates (Result retval);
308
309 /**
310 * Returns the Dual Mono mode presentation setting.
311 *
312 * Optional method
313 *
314 * @return retval operation completion status.
315 * @return mode current setting of Dual Mono mode.
316 */
317 getDualMonoMode() generates (Result retval, DualMonoMode mode);
318
319 /**
320 * Sets the Dual Mono mode presentation on the output device.
321 *
322 * The Dual Mono mode is generally applied to stereo audio streams
323 * where the left and right channels come from separate sources.
324 *
325 * Optional method
326 *
327 * @param mode selected Dual Mono mode.
328 * @return retval operation completion status.
329 */
330 setDualMonoMode(DualMonoMode mode) generates (Result retval);
331
332 /**
333 * Returns the Audio Description Mix level in dB.
334 *
335 * The level is applied to streams incorporating a secondary Audio
336 * Description stream. It specifies the relative level of mixing for
337 * the Audio Description with a reference to the Main Audio.
338 *
339 * Optional method
340 *
341 * The value of the relative level is in the range from negative infinity
342 * to +48.
343 *
344 * @return retval operation completion status.
345 * @return leveldB the current Audio Description Mix Level in dB.
346 */
347 getAudioDescriptionMixLevel() generates (Result retval, float leveldB);
348
349 /**
350 * Sets the Audio Description Mix level in dB.
351 *
352 * For streams incorporating a secondary Audio Description stream
353 * the relative level of mixing of the Audio Description to the Main Audio
354 * is controlled by this method.
355 *
356 * Optional method
357 *
358 * The value of the relative level must be in the range from negative
359 * infinity to +48.
360 *
361 * @param leveldB Audio Description Mix Level in dB
362 * @return retval operation completion status.
363 */
364 setAudioDescriptionMixLevel(float leveldB) generates (Result retval);
365
366 /**
367 * Retrieves current playback rate parameters.
368 *
369 * Optional method
370 *
371 * @return retval operation completion status.
372 * @return playbackRate current playback parameters
373 */
374 getPlaybackRateParameters()
375 generates (Result retval, PlaybackRate playbackRate);
376
377 /**
378 * Sets the playback rate parameters that control playback behavior.
379 * This is normally used when playing encoded content and decoding
380 * is performed in hardware. Otherwise, the framework can apply
381 * necessary transformations.
382 *
383 * Optional method
384 *
385 * If the HAL supports setting the playback rate, it is recommended
386 * to support speed and pitch values at least in the range
387 * from 0.5f to 2.0f, inclusive (see the definition of PlaybackRate struct).
388 *
389 * @param playbackRate playback parameters
390 * @return retval operation completion status.
391 */
392 setPlaybackRateParameters(PlaybackRate playbackRate)
393 generates (Result retval);
394};