fast音频低时延
1.1 音频时延
音频时延指从用户触发点击到声音从设备播放所经过的时间。
从音频数据传输的角度,指从触发回调写入开始,到最终播放出声的耗时,包括数据写入、算法处理、硬件传输延迟,以及在蓝牙场景下的蓝牙传输延迟。
当前OHAudio支持两种模式:普通模式(AUDIOSTREAM_LATENCY_MODE_NORMAL)和低时延模式(AUDIOSTREAM_LATENCY_MODE_FAST)。
开发者通过调用OH_AudioStreamBuilder_SetLatencyMode(),设置OH_AudioStream_LatencyMode,来决定音频流使用哪种模式。
低时延模式通过读写数据架构优化,使得该模式下音频播放和录制具有更低的时延。
为使用低时延模式,开发者需要使用OHAudio进行音频开发。设置低时延模式开发示例:
OH_AudioStream_LatencyMode latencyMode = AUDIOSTREAM_LATENCY_MODE_FAST;
OH_AudioStreamBuilder_SetLatencyMode(builder, latencyMode);
在低时延模式下,应用需要每5ms提供一次数据,如果送数据不及时可能导致杂音等问题。
但在以下场景中,即使设置了低时延模式,系统仍会使用普通模式:
-
当前设备不支持低时延模式。
-
采样率设置为非48K。
-
系统低时延资源已被全部占用。
游戏类应用对时延要求较高,建议使用低时延模式。
1.2 Fast低时延框架设计
消除传统音频流水线中因多次数据拷贝、用户态与内核态切换、以及中断处理所带来的延迟,为对实时性要求极高的应用(如VoIP、游戏音效、专业音频制作)提供毫秒级的音频输出延迟。
代码位置:
..\foundation\multimedia\audio_framework\frameworks\native\hdiadapter\sink\fast
..\foundation\multimedia\audio_framework\frameworks\native\hdiadapter\source\fast
2 接口调用
在hdiadapter通过loadadapter加载fast的适配器。
FastAudioRenderSink::Init用于初始化一个fast音频渲染器
int32_t FastAudioRenderSink::Init(const IAudioSinkAttr &attr)
{
AUDIO_INFO_LOG("in");
attr_ = attr;
halName_ = attr_.audioStreamFlag == AUDIO_FLAG_MMAP ? "primary" : "voip";
int32_t ret = CreateRender();
CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_NOT_STARTED, "create render fail");
ret = PrepareMmapBuffer();
CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_NOT_STARTED, "prepare mmap buffer fail");
sinkInited_ = true;
return SUCCESS;
}
FastAudioRenderSink::Start() 用于启动音频渲染器,开始播放
int32_t FastAudioRenderSink::Start(void)
{
AUDIO_INFO_LOG("in");
std::lock_guard<std::mutex> lock(startMutex_);
Trace trace("FastAudioRenderSink::Start");
AudioXCollie audioXCollie("FastAudioRenderSink::Start", TIMEOUT_SECONDS_10);
int64_t stamp = ClockTime::GetCurNano();
if (started_) {
return SUCCESS;
}
CHECK_AND_RETURN_RET_LOG(audioRender_ != nullptr, ERR_INVALID_HANDLE, "render is nullptr");
int32_t ret = audioRender_->Start(audioRender_); //调用底层接口启动渲染器
CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_NOT_STARTED, "start fail");
UpdateSinkState(true);
ret = CheckPositionTime();
CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_NOT_STARTED, "check position time fail");
#ifdef FEATURE_POWER_MANAGER
if (runningLock_ == nullptr) {
WatchTimeout guard("create AudioRunningLock start");
runningLock_ = std::make_shared<AudioRunningLock>(std::string(RUNNING_LOCK_NAME));
guard.CheckCurrTimeout();
}
if (runningLock_ != nullptr) {
runningLock_->Lock(RUNNING_LOCK_TIMEOUTMS_LASTING);
} else {
AUDIO_ERR_LOG("running lock is null, playback can not work well");
}
#endif
AudioPerformanceMonitor::GetInstance().RecordTimeStamp(ADAPTER_TYPE_FAST, INIT_LASTWRITTEN_TIME);
started_ = true;
AUDIO_DEBUG_LOG("cost: [%{public}" PRId64 "]ms", (ClockTime::GetCurNano() - stamp) / AUDIO_US_PER_SECOND);
return SUCCESS;
}
FastAudioRenderSink::RenderFrame用于将音频数据写入缓冲区
int32_t FastAudioRenderSink::RenderFrame(char &data, uint64_t len, uint64_t &writeLen)
{
#ifdef DEBUG_DIRECT_USE_HDI
int64_t stamp = ClockTime::GetCurNano();
if (len > (bufferSize_ - eachReadFrameSize_ * frameSizeInByte_ * writeAheadPeriod_)) {
writeLen = 0;
AUDIO_ERR_LOG("fail, too large, len: [%{public}" PRIu64 "]", len);
return ERR_WRITE_FAILED;
}
if (isFirstWrite_) {
PreparePosition();
}
CHECK_AND_RETURN_RET_LOG((curWritePos_ >= 0 && curWritePos_ < bufferSize_), ERR_INVALID_PARAM, "invalid write pos");
char *writePtr = bufferAddresss_ + curWritePos_;
uint64_t dataBefore = *(uint64_t *)writePtr;
uint64_t dataAfter = 0;
uint64_t tempPos = curWritePos_ + len;
if (tempPos <= bufferSize_) {
int32_t ret = memcpy_s(writePtr, (bufferSize_ - curWritePos_), static_cast<void *>(&data), len);
CHECK_AND_RETURN_RET_LOG(ret == EOK, ERR_WRITE_FAILED, "copy fail");
dataAfter = *(uint64_t *)writePtr;
curWritePos_ = (tempPos == bufferSize_ ? 0 : tempPos);
} else {
AUDIO_DEBUG_LOG("curWritePos + len is %{public}" PRIu64 ", more than bufferSize", tempPos);
size_t writeableSize = bufferSize_ - curWritePos_;
if (memcpy_s(writePtr, writeableSize, static_cast<void *>(&data), writeableSize) ||
memcpy_s(bufferAddresss_, bufferSize_, static_cast<void *>((char *)&data + writeableSize),
(len - writeableSize))) {
AUDIO_ERR_LOG("copy fail");
return ERR_WRITE_FAILED;
}
curWritePos_ = len - writeableSize;
}
writeLen = len;
stamp = (ClockTime::GetCurNano() - stamp) / AUDIO_US_PER_SECOND;
AUDIO_DEBUG_LOG("len: [%{public}" PRIu64 "], cost: [%{public}" PRId64 "]ms, curWritePos: [%{public}d], dataBefore: "
"[%{public}" PRIu64 "], dataAfter: [%{public}" PRIu64 "]", len, stamp, curWritePos_, dataBefore, dataAfter);
return SUCCESS;
#else
AUDIO_INFO_LOG("not support");
return ERR_NOT_SUPPORTED;
#endif
}
FastAudioRenderSink::Stop用于停止音频渲染器
int32_t FastAudioRenderSink::Stop(void)
{
AUDIO_INFO_LOG("in");
std::lock_guard<std::mutex> lock(startMutex_);
Trace trace("FastAudioRenderSink::Stop");
AudioXCollie audioXCollie("FastAudioRenderSink::Stop", TIMEOUT_SECONDS_10);
#ifdef FEATURE_POWER_MANAGER
if (runningLock_ != nullptr) {
AUDIO_INFO_LOG("running lock unlock");
runningLock_->UnLock();
} else {
AUDIO_WARNING_LOG("running lock is null, playback can not work well");
}
#endif
if (!started_) {
return SUCCESS;
}
CHECK_AND_RETURN_RET_LOG(audioRender_ != nullptr, ERR_INVALID_HANDLE, "render is nullptr");
int32_t ret = audioRender_->Stop(audioRender_);
UpdateSinkState(false);
CHECK_AND_RETURN_RET_LOG(ret == SUCCESS, ERR_NOT_STARTED, "stop fail, ret: %{public}d", ret);
started_ = false;
return SUCCESS;
}
3 测试音频低时延
1.通过打印日志,追踪框架内的时延。普通通路是20ms/93ms一帧,低时延5ms一帧。
2.端到端(从送数据到扬声器播放出来)的测试播录的时延,把录制的音频播放出来,然后用设备拍摄视频,从敲击发出声音到手机发出声音,减去demo的处理时间。就得到播录的时延。
更多推荐

所有评论(0)