承接上一章节分析:
【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 4】
本系列文章分析的安卓源码版本:【Android 10.0 版本】
【此章节小节编号将重新排序】
mCodec->configureCodec(mime.c_str(), msg)实现分析:
ACodec配置编解码器。【备注:该章节分析的该配置处理流程非常长】
// [frameworks/av/media/libstagefright/ACodec.cpp]
status_t ACodec::configureCodec(
const char *mime, const sp<AMessage> &msg) {
// 获取是否为编码器,默认为false即0
int32_t encoder;
if (!msg->findInt32("encoder", &encoder)) {
encoder = false;
}
// 创建出入输出格式信息
sp<AMessage> inputFormat = new AMessage;
sp<AMessage> outputFormat = new AMessage;
// 缓存传入参数信息即输入格式配置信息
mConfigFormat = msg;
mIsEncoder = encoder;
// 是否为视频编解码器
mIsVideo = !strncasecmp(mime, "video/", 6);
// 是否为图片编解码器
mIsImage = !strncasecmp(mime, "image/", 6);
// 预设输入输出buffer端口模式为字节buffer
// 备注:此前已初始化相同模式几次了
mPortMode[kPortIndexInput] = IOMX::kPortModePresetByteBuffer;
mPortMode[kPortIndexOutput] = IOMX::kPortModePresetByteBuffer;
// 设置组件角色信息
// 见第1小节分析
status_t err = setComponentRole(encoder /* isEncoder */, mime);
// 编解码器角色信息必须不能出错
if (err != OK) {
return err;
}
// 码率控制类型(模式)枚举:如可变码率、恒定码率、恒定质量码率等码率模式
OMX_VIDEO_CONTROLRATETYPE bitrateMode;
int32_t bitrate = 0, quality;
// 注释:FLAC和video编码器在恒定质量码率模式下不需要一个码率,其他编码器则需要
// FLAC encoder or video encoder in constant quality mode doesn't need a
// bitrate, other encoders do.
if (encoder) {
// 编码器时
if (mIsVideo || mIsImage) {
// 视频或图片编码器时,查找视频码率控制信息,默认为可变码率模式
// 见第2小节分析
if (!findVideoBitrateControlInfo(msg, &bitrateMode, &bitrate, &quality)) {
return INVALID_OPERATION;
}
} else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)
&& !msg->findInt32("bitrate", &bitrate)) {
// 若是flac音频则取码率,失败则进入此处
return INVALID_OPERATION;
}
}
// 根据注释可知:获取原输入格式码率信息,设置给输出格式对象(使其可以数据合成即复用处理流程),并且还有ISO国际标准的阐述
// propagate bitrate to the output so that the muxer has it
if (encoder && msg->findInt32("bitrate", &bitrate)) {
// Technically ISO spec says that 'bitrate' should be 0 for VBR even though it is the
// average bitrate. We've been setting both bitrate and max-bitrate to this same value.
outputFormat->setInt32("bitrate", bitrate);
outputFormat->setInt32("max-bitrate", bitrate);
}
// 存储元
int32_t storeMeta;
if (encoder) {
// 编码器时
IOMX::PortMode mode = IOMX::kPortModePresetByteBuffer;
// 获取输入媒体元数据buffer类型(枚举)
// 备注:其实该枚举类型数据的作用简单的说就是用来定义,在编码时使用的媒体元buffer数据类型(比如ANativeWindowBuffer),
// 将会被传递给编码器进行编码使用,并且也是定义当前媒体元数据的数据源的生产者,比如来自Camera组件。
// 【其实也就是说编码器在这些类型时处理完buffer后不能直接释放它的内存,应该返回给每个生产者提供方(组件),提供方才能释放它】
if (msg->findInt32("android._input-metadata-buffer-type", &storeMeta)
&& storeMeta != kMetadataBufferTypeInvalid) {
// 有效时进入
if (storeMeta == kMetadataBufferTypeNativeHandleSource) {
mode = IOMX::kPortModeDynamicNativeHandle;
} else if (storeMeta == kMetadataBufferTypeANWBuffer ||
storeMeta == kMetadataBufferTypeGrallocSource) {
mode = IOMX::kPortModeDynamicANWBuffer;
} else {
return BAD_VALUE;
}
}
// 设置输入缓冲区buffer端口模式,也就是数据源buffer类型
// TODO:目前暂不单独分析编码器的处理,后续有时间再考虑。此处将会调用具体so编码器组件实现内部
err = setPortMode(kPortIndexInput, mode);
if (err != OK) {
return err;
}
if (mode != IOMX::kPortModePresetByteBuffer) {
uint32_t usageBits;
if (mOMXNode->getParameter(
(OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usageBits, sizeof(usageBits)) == OK) {
inputFormat->setInt32(
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
}
}
}
// TODO:下面涉及到编码器部分时目前都暂不单独分析编码器的处理,后续有时间再考虑。
// 预置SPS、PPS给IDR帧(即I帧)
int32_t prependSPSPPS = 0;
if (encoder && mIsVideo
&& msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
&& prependSPSPPS != 0) {
OMX_INDEXTYPE index;
err = mOMXNode->getExtensionIndex(
"OMX.google.android.index.prependSPSPPSToIDRFrames", &index);
if (err == OK) {
PrependSPSPPSToIDRFramesParams params;
InitOMXParams(¶ms);
params.bEnable = OMX_TRUE;
err = mOMXNode->setParameter(index, ¶ms, sizeof(params));
}
if (err != OK) {
ALOGE("Encoder could not be configured to emit SPS/PPS before "
"IDR frames. (err %d)", err);
return err;
}
}
// Only enable metadata mode on encoder output if encoder can prepend
// sps/pps to idr frames, since in metadata mode the bitstream is in an
// opaque handle, to which we don't have access.
if (encoder && mIsVideo) {
OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
&& msg->findInt32("android._store-metadata-in-buffers-output", &storeMeta)
&& storeMeta != 0);
if (mFlags & kFlagIsSecure) {
enable = OMX_TRUE;
}
err = setPortMode(kPortIndexOutput, enable ?
IOMX::kPortModePresetSecureBuffer : IOMX::kPortModePresetByteBuffer);
if (err != OK) {
return err;
}
if (!msg->findInt64(
KEY_REPEAT_PREVIOUS_FRAME_AFTER, &mRepeatFrameDelayUs)) {
mRepeatFrameDelayUs = -1LL;
}
if (!msg->findDouble("time-lapse-fps", &mCaptureFps)) {
float captureRate;
if (msg->findAsFloat(KEY_CAPTURE_RATE, &captureRate)) {
mCaptureFps = captureRate;
} else {
mCaptureFps = -1.0;
}
}
if (!msg->findInt32(
KEY_CREATE_INPUT_SURFACE_SUSPENDED,
(int32_t*)&mCreateInputBuffersSuspended)) {
mCreateInputBuffersSuspended = false;
}
}
if (encoder && (mIsVideo || mIsImage)) {
// only allow 32-bit value, since we pass it as U32 to OMX.
if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
mMaxPtsGapUs = 0LL;
} else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
mMaxPtsGapUs = 0LL;
}
if (!msg->findFloat(KEY_MAX_FPS_TO_ENCODER, &mMaxFps)) {
mMaxFps = -1;
}
// notify GraphicBufferSource to allow backward frames
if (mMaxPtsGapUs < 0LL) {
mMaxFps = -1;
}
}
// 提示:video解码器仅使用native window
// NOTE: we only use native window for video decoders
sp<RefBase> obj;
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && mIsVideo && !encoder;
mUsingNativeWindow = haveNativeWindow;
if (mIsVideo && !encoder) {
// 视频解码器时
// 设置自适应播放标志为false
inputFormat->setInt32("adaptive-playback", false);
int32_t usageProtected;
if (msg->findInt32("protected", &usageProtected) && usageProtected) {
// 使用保护时即输出buffer受保护
if (!haveNativeWindow) {
// 受保护的输出缓冲区buffer必须发送到ANativeWindow
ALOGE("protected output buffers must be sent to an ANativeWindow");
// 权限拒绝
return PERMISSION_DENIED;
}
// 添加对应的标记位
mFlags |= kFlagIsGrallocUsageProtected;
mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
}
// 安全编解码器处理暂不分析,主要也只是设置输入buffer端口模式
if (mFlags & kFlagIsSecure) {
// use native_handles for secure input buffers
err = setPortMode(kPortIndexInput, IOMX::kPortModePresetSecureBuffer);
if (err != OK) {
ALOGI("falling back to non-native_handles");
setPortMode(kPortIndexInput, IOMX::kPortModePresetByteBuffer);
err = OK; // ignore error for now
}
}
if (haveNativeWindow) {
// 有native window时(如有Surface时)
// 强转
sp<ANativeWindow> nativeWindow =
static_cast<ANativeWindow *>(static_cast<Surface *>(obj.get()));
// 如注释:下面这段处理功能可能会被移除,目前只是临时支持自动化FRC即画面更流畅处理等,
// FRC见前面章节已提到的阐述,该参数也就是前面流程中设置下来的,默认不开启该功能。
// 此处暂不分析,下面处理大致原理,就是将该配置信息传递给具体解码器实现者
// START of temporary support for automatic FRC - THIS WILL BE REMOVED
int32_t autoFrc;
if (msg->findInt32("auto-frc", &autoFrc)) {
bool enabled = autoFrc;
OMX_CONFIG_BOOLEANTYPE config;
InitOMXParams(&config);
config.bEnabled = (OMX_BOOL)enabled;
status_t temp = mOMXNode->setConfig(
(OMX_INDEXTYPE)OMX_IndexConfigAutoFramerateConversion,
&config, sizeof(config));
if (temp == OK) {
outputFormat->setInt32("auto-frc", enabled);
} else if (enabled) {
ALOGI("codec does not support requested auto-frc (err %d)", temp);
}
}
// END of temporary support for automatic FRC
// 获取播放隧道特性信息,默认情况下未设置的,因此目前暂不分析开启处理
int32_t tunneled;
if (msg->findInt32("feature-tunneled-playback", &tunneled) &&
tunneled != 0) {
ALOGI("Configuring TUNNELED video playback.");
mTunneled = true;
int32_t audioHwSync = 0;
if (!msg->findInt32("audio-hw-sync", &audioHwSync)) {
ALOGW("No Audio HW Sync provided for video tunnel");
}
err = configureTunneledVideoPlayback(audioHwSync, nativeWindow);
if (err != OK) {
ALOGE("configureTunneledVideoPlayback(%d,%p) failed!",
audioHwSync, nativeWindow.get());
return err;
}
int32_t maxWidth = 0, maxHeight = 0;
if (msg->findInt32("max-width", &maxWidth) &&
msg->findInt32("max-height", &maxHeight)) {
err = mOMXNode->prepareForAdaptivePlayback(
kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
if (err != OK) {
ALOGW("[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
// allow failure
err = OK;
} else {
inputFormat->setInt32("max-width", maxWidth);
inputFormat->setInt32("max-height", maxHeight);
inputFormat->setInt32("adaptive-playback", true);
}
}
} else {
// 未设置该特性标记位时
// 配置CPU控制的视频播放
ALOGV("Configuring CPU controlled video playback.");
mTunneled = false;
// Explicity reset the sideband handle of the window for
// non-tunneled video in case the window was previously used
// for a tunneled video playback.
// 英文注释说的比较清楚了,window边带流处理是给tunneled隧道化视频播放的,因此此处设置null。
// 该处理将会执行 window->perform()的该方法,其实际就是Surface的该方法实现。后续Surface章节中分析。
err = native_window_set_sideband_stream(nativeWindow.get(), NULL);
if (err != OK) {
ALOGE("set_sideband_stream(NULL) failed! (err %d).", err);
return err;
}
// 设置输出缓冲区buffer端口模式为 kPortModeDynamicANWBuffer 即会使用动态ANativeWindowBuffer来传递已解码输出数据
// 见第3小节分析
err = setPortMode(kPortIndexOutput, IOMX::kPortModeDynamicANWBuffer);
if (err != OK) {
// 如3小节分析可知,若是SoftAVCDec组件实现的话,那么将会执行此处,即不支持上面指定的buffer类型的输出数据。
// 根据因为注释可知,错误时此处将会回退处理,但这个回退机制将被删除,因为它的内存需求很大。
// if adaptive playback has been requested, try JB fallback
// NOTE: THIS FALLBACK MECHANISM WILL BE REMOVED DUE TO ITS
// LARGE MEMORY REQUIREMENT
// 注意:不会在软件访问的Surface上进行自适应回放,因为它们从来不会响应裁剪窗口的变化,
// 我们也不相信它们能够做到这一点。
// we will not do adaptive playback on software accessed
// surfaces as they never had to respond to changes in the
// crop window, and we don't trust that they will be able to.
int usageBits = 0;
// 处理是否支持自适应播放
bool canDoAdaptivePlayback;
// 关于Surface相关分析将会在后续有时间时再分析吧
if (nativeWindow->query(
nativeWindow.get(),
NATIVE_WINDOW_CONSUMER_USAGE_BITS,
&usageBits) != OK) {
canDoAdaptivePlayback = false;
} else {
// 获取native window的支持标记位并判断是否支持软访问Surface标记位,
// 若都不支持则才能设置为true,即支持自适应访问【正如上面英文所述】
canDoAdaptivePlayback =
(usageBits &
(GRALLOC_USAGE_SW_READ_MASK |
GRALLOC_USAGE_SW_WRITE_MASK)) == 0;
}
int32_t maxWidth = 0, maxHeight = 0;
if (canDoAdaptivePlayback &&
msg->findInt32("max-width", &maxWidth) &&
msg->findInt32("max-height", &maxHeight)) {
// 获取最大宽高成功时
ALOGV("[%s] prepareForAdaptivePlayback(%dx%d)",
mComponentName.c_str(), maxWidth, maxHeight);
// 自适应播放准备请求
// 见第4小节分析
err = mOMXNode->prepareForAdaptivePlayback(
kPortIndexOutput, OMX_TRUE, maxWidth, maxHeight);
ALOGW_IF(err != OK,
"[%s] prepareForAdaptivePlayback failed w/ err %d",
mComponentName.c_str(), err);
if (err == OK) {
// 成功时更新新创建的输入格式信息
inputFormat->setInt32("max-width", maxWidth);
inputFormat->setInt32("max-height", maxHeight);
// 自适应播放为true
inputFormat->setInt32("adaptive-playback", true);
}
}
// 此处允许该case下失败
// allow failure
err = OK;
} else {
// 若组件支持该buffer参数类型时
ALOGV("[%s] setPortMode on output to %s succeeded",
mComponentName.c_str(), asString(IOMX::kPortModeDynamicANWBuffer));
CHECK(storingMetadataInDecodedBuffers());
inputFormat->setInt32("adaptive-playback", true);
}
int32_t push;
if (msg->findInt32("push-blank-buffers-on-shutdown", &push)
&& push != 0) {
mFlags |= kFlagPushBlankBuffersToNativeWindowOnShutdown;
}
}
// 获取视频旋转角度,默认为0
int32_t rotationDegrees;
if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
mRotationDegrees = rotationDegrees;
} else {
mRotationDegrees = 0;
}
}
// 获取pcm编码位深,默认16位
AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
(void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
// invalid encodings will default to PCM-16bit in setupRawAudioFormat.
if (mIsVideo || mIsImage) {
// 视频或图形编解码器时判断是否需要软渲染器
// determine need for software renderer
bool usingSwRenderer = false;
if (haveNativeWindow && mComponentName.startsWith("OMX.google.")) {
// 举例分析的SoftAVCDec即安卓原生编解码器将会进入此处,使用软渲染器
usingSwRenderer = true;
haveNativeWindow = false;
// 见前面流程分析
(void)setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
} else if (haveNativeWindow && !storingMetadataInDecodedBuffers()) {
// storingMetadataInDecodedBuffers() 其实是判断非google解码器是否已设置和使用该输出buffer模式,
// 若是则不需要再次设置,否则需要设置
err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetANWBuffer);
if (err != OK) {
return err;
}
}
if (encoder) {
// 视频编码器请查看我此前已分析过的MediaRecorder章节源码实现分析
err = setupVideoEncoder(mime, msg, outputFormat, inputFormat);
} else {
// 该系列文章主要关注 视频解码器初始化
// 见第5小节分析
err = setupVideoDecoder(mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
if (err != OK) {
return err;
}
// 备注:通过前面分析可知,在安卓google原生SoftAVCDec软解码器组件时,haveNativeWindow值为false,usingSwRenderer为true。
// 因此此处if中处理暂不分析 TODO
// 备注:其实从下面if中的分析,当该组件设备不支持不能处理本地窗口的flex-YUV【灵活YUV格式】请求,
// 也将退回到SW渲染器即软渲染器来渲染。另外使用软渲染器时意味着一般情况下都需要支持自适应播放
// 并且支持bytebuffer字节buffer模式传递数据,该模式一般为软编解码器支持,而当硬编解码器是则不一定支持。
if (haveNativeWindow) {
// 全局缓存ANativeWindow对象
mNativeWindow = static_cast<Surface *>(obj.get());
// fallback for devices that do not handle flex-YUV for native buffers
int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
if (msg->findInt32("color-format", &requestedColorFormat) &&
requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
status_t err = getPortFormat(kPortIndexOutput, outputFormat);
if (err != OK) {
return err;
}
int32_t colorFormat = OMX_COLOR_FormatUnused;
OMX_U32 flexibleEquivalent = OMX_COLOR_FormatUnused;
if (!outputFormat->findInt32("color-format", &colorFormat)) {
ALOGE("ouptut port did not have a color format (wrong domain?)");
return BAD_VALUE;
}
ALOGD("[%s] Requested output format %#x and got %#x.",
mComponentName.c_str(), requestedColorFormat, colorFormat);
if (!IsFlexibleColorFormat(
mOMXNode, colorFormat, haveNativeWindow, &flexibleEquivalent)
|| flexibleEquivalent != (OMX_U32)requestedColorFormat) {
// device did not handle flex-YUV request for native window, fall back
// to SW renderer
ALOGI("[%s] Falling back to software renderer", mComponentName.c_str());
mNativeWindow.clear();
mNativeWindowUsageBits = 0;
haveNativeWindow = false;
usingSwRenderer = true;
// TODO: implement adaptive-playback support for bytebuffer mode.
// This is done by SW codecs, but most HW codecs don't support it.
err = setPortMode(kPortIndexOutput, IOMX::kPortModePresetByteBuffer);
inputFormat->setInt32("adaptive-playback", false);
if (mFlags & kFlagIsGrallocUsageProtected) {
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
} else if (err == OK) {
err = setupVideoDecoder(
mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
}
}
}
if (usingSwRenderer) {
// 输出格式信息中缓存使用软渲染器标识
outputFormat->setInt32("using-sw-renderer", 1);
}
// 下面else if中处理的都是音频编解码器组件,我们只挑选AAC编解码器来举例分析
// 备注:它们主要处理的是 音频声道类型、采样率、采样PCM位深等,
// 另外前面分析的时候只举例分析了视频组件实现,而音频没分析,
// 因此后续的分析将会直接分析具体实现而不再补充上面的实现。
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
// Since we did not always check for these, leave them optional
// and have the decoder figure it all out.
err = OK;
} else {
err = setupRawAudioFormat(
encoder ? kPortIndexInput : kPortIndexOutput,
sampleRate,
numChannels);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
// AAC编解码器时
// 输入格式必须有配置声道数和采样率,否则为无效
int32_t numChannels, sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
// 获取成功时
// 是否为ADTS传输格式以及AAC档次级别
// 备注:ADTS全称是音频数据传输流(Audio Data Transport Stream),是AAC的一种十分常见的传输格式。
// 一般的AAC解码器都需要把AAC的ES流打包成ADTS的格式,一般是在AAC ES流前添加7个字节的ADTS header,
// 可以把ADTS这个头看作是AAC的帧头frameheader。
int32_t isADTS, aacProfile;
// SBR模式
// 备注:SBR技术在编码段只需要提取少量的参数,就可以在解码端通过这些参数重建高频信号。
// 从而实现用较少的比特率获得更高的音频质量。
// 编码端:SBR 编码器的输入时原始音频文件,通过正交镜像滤波器组( QMF )变换得到分析样点,然后提取保包络信息和控制参数。
// 解码端:SBR 先利用 AAC 解出的低频信号复制出高频信号,然后根据提取的控制参数对包络进行调整。
int32_t sbrMode;
// 最大输出格式声道数
int32_t maxOutputChannelCount;
// PCM限制器是否启用(DRC的增益调整幅度较大,防止削顶的:Limiter)
int32_t pcmLimiterEnable;
// 音频动态范围压缩(规划控制)Dynamic Range Compression【有说是Control】
// 在声学领域中,DRC一般用来动态调整音频输出幅值,在音量大时压制音量在某一范围内,在音量小时适当提升音量。
// 通常用于控制音频输出功率,使扬声器不破音,当处于低音量播放时也能清晰听到。
// DRC通常用于声音的记录和再现、广播,现场声音增强和某些乐器放大器中。
drcParams_t drc;
// 获取ADTS标志,失败为非ADTS
if (!msg->findInt32("is-adts", &isADTS)) {
isADTS = 0;
}
// 获取AAC档次级别
if (!msg->findInt32("aac-profile", &aacProfile)) {
aacProfile = OMX_AUDIO_AACObjectNull;
}
// 获取SBR模式
if (!msg->findInt32("aac-sbr-mode", &sbrMode)) {
sbrMode = -1;
}
// 获取最大输出格式声道数
if (!msg->findInt32("aac-max-output-channel_count", &maxOutputChannelCount)) {
maxOutputChannelCount = -1;
}
// 获取PCM限制器是否启用标志位
if (!msg->findInt32("aac-pcm-limiter-enable", &pcmLimiterEnable)) {
// value is unknown
pcmLimiterEnable = -1;
}
// 编码的目标级别
if (!msg->findInt32("aac-encoded-target-level", &drc.encodedTargetLevel)) {
// value is unknown
drc.encodedTargetLevel = -1;
}
// 底噪~Cutoff,attenuation
if (!msg->findInt32("aac-drc-cut-level", &drc.drcCut)) {
// value is unknown
drc.drcCut = -1;
}
// 增益级别
if (!msg->findInt32("aac-drc-boost-level", &drc.drcBoost)) {
// value is unknown
drc.drcBoost = -1;
}
// 压缩程度
if (!msg->findInt32("aac-drc-heavy-compression", &drc.heavyCompression)) {
// value is unknown
drc.heavyCompression = -1;
}
// 目标级别
if (!msg->findInt32("aac-target-ref-level", &drc.targetRefLevel)) {
// value is unknown
drc.targetRefLevel = -1;
}
// 效果类型
if (!msg->findInt32("aac-drc-effect-type", &drc.effectType)) {
// value is unknown
drc.effectType = -2; // valid values are -1 and over
}
// 初始化AAC编解码器组件
// 见第6小节分析
err = setupAACCodec(
encoder, numChannels, sampleRate, bitrate, aacProfile,
isADTS != 0, sbrMode, maxOutputChannelCount, drc,
pcmLimiterEnable);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
err = setupAMRCodec(encoder, false /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
err = setupAMRCodec(encoder, true /* isWAMR */, bitrate);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) {
// These are PCM-like formats with a fixed sample rate but
// a variable number of channels.
int32_t numChannels;
if (!msg->findInt32("channel-count", &numChannels)) {
err = INVALID_OPERATION;
} else {
int32_t sampleRate;
if (!msg->findInt32("sample-rate", &sampleRate)) {
sampleRate = 8000;
}
err = setupG711Codec(encoder, sampleRate, numChannels);
}
#ifdef QTI_FLAC_DECODER
// Qti的FLAC解码器默认开启
//setup qti component From setupCustomCodec only when it starts with OMX.qti. otherwise create incoming component.
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC) && !mComponentName.startsWith("OMX.qti.")) {
#else
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
#endif
// numChannels needs to be set to properly communicate PCM values.
int32_t numChannels = 2, sampleRate = 44100, compressionLevel = -1;
if (encoder &&
(!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate))) {
ALOGE("missing channel count or sample rate for FLAC encoder");
err = INVALID_OPERATION;
} else {
if (encoder) {
if (!msg->findInt32(
"complexity", &compressionLevel) &&
!msg->findInt32(
"flac-compression-level", &compressionLevel)) {
compressionLevel = 5; // default FLAC compression level
} else if (compressionLevel < 0) {
ALOGW("compression level %d outside [0..8] range, "
"using 0",
compressionLevel);
compressionLevel = 0;
} else if (compressionLevel > 8) {
ALOGW("compression level %d outside [0..8] range, "
"using 8",
compressionLevel);
compressionLevel = 8;
}
}
err = setupFlacCodec(
encoder, numChannels, sampleRate, compressionLevel, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
int32_t numChannels, sampleRate;
if (encoder
|| !msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupAC3Codec(encoder, numChannels, sampleRate);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EAC3)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupEAC3Codec(encoder, numChannels, sampleRate);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC4)) {
int32_t numChannels;
int32_t sampleRate;
if (!msg->findInt32("channel-count", &numChannels)
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
err = setupAC4Codec(encoder, numChannels, sampleRate);
}
} else {
// 其它音频编解码器创建则执行该方法
// 备注:该方法在ACodec中其实际为空实现
err = setupCustomCodec(err, mime, msg);
}
if (err != OK) {
return err;
}
// 获取编码器延迟时长。默认为0
if (!msg->findInt32("encoder-delay", &mEncoderDelay)) {
mEncoderDelay = 0;
}
// 获取编码器填充。默认为0
if (!msg->findInt32("encoder-padding", &mEncoderPadding)) {
mEncoderPadding = 0;
}
// 是否存在声道掩码类型,该类型此前流程中有分析过,也就是对应的声道(数)类型
if (msg->findInt32("channel-mask", &mChannelMask)) {
mChannelMaskPresent = true;
} else {
mChannelMaskPresent = false;
}
int32_t maxInputSize;
// 获取最大输入大小
if (msg->findInt32("max-input-size", &maxInputSize)) {
// 获取成功时,设置最小输入buffer大小
// 见第7小节分析
err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize);
// 不管是否失败,失败则会使用默认大小
err = OK; // ignore error
} else if (!strcmp("OMX.Nvidia.aac.decoder", mComponentName.c_str())) {
// 英伟达AAC解码器时,直接固定设置该输入buffer大小为8KB
err = setMinBufferSize(kPortIndexInput, 8192); // XXX
err = OK; // ignore error
}
// 获取优先级
int32_t priority;
if (msg->findInt32("priority", &priority)) {
// 设置原始音视频格式优先级参数,失败也无影响
// 见第8小节分析
err = setPriority(priority);
err = OK; // ignore error
}
// 获取操作码率,16MHz为最大上限(边界值),先尝试获取float,失败则获取int
int32_t rateInt = -1;
float rateFloat = -1;
if (!msg->findFloat("operating-rate", &rateFloat)) {
msg->findInt32("operating-rate", &rateInt);
rateFloat = (float)rateInt; // 16MHz (FLINTMAX) is OK for upper bound.
}
if (rateFloat > 0) {
// 设置操作码率,失败也无影响
// 见第9小节分析
err = setOperatingRate(rateFloat, mIsVideo);
err = OK; // ignore errors
}
if (err == OK) {
// 设置原始视频格式vendor厂商参数
// 见第10小节分析
err = setVendorParameters(msg);
if (err != OK) {
return err;
}
}
// 注意:mBaseOutputFormat和mOutputFormat都是表示第一帧的outputFormat。
// NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
mBaseOutputFormat = outputFormat;
mLastOutputFormat.clear();
// 获取(输入)端口buffer格式信息
// 见第11小节分析
err = getPortFormat(kPortIndexInput, inputFormat);
if (err == OK) {
// 成功时
// 获取(输出)端口buffer格式信息
// 见第11小节分析
err = getPortFormat(kPortIndexOutput, outputFormat);
if (err == OK) {
// 成功时缓存到全局变量中
mInputFormat = inputFormat;
mOutputFormat = outputFormat;
}
}
// 如果需要则创建数据转换器
// 备注:如下主要就是将不同音频PCM原始数据编码位深数据之间进行数据转换,若相同则不需要PCM数据转换器
// create data converters if needed
if (!mIsVideo && !mIsImage && err == OK) {
// 此处处理的是音频编解码器
// 默认音频PCM原始数据编码位深为16,通常位深单位有:8、16、24、32、float(其实一般为也为32)这几种
AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
if (encoder) {
// 编码器时
// 设置输入buffer的PCM编码位深默认值(16bits)
(void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
// 创建输入buffer的PCM编码数据转换器
// 关于该实现目前就不展开分析了, TODO
// 其实也比较简单,就是不同PCM数据位深中间的数据转换存储,目前我们可以只考虑相同位深即不需要转换的处理即可
mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
if (mConverter[kPortIndexInput] != NULL) {
ALOGD("%s: encoder %s input format pcm encoding converter from %d to %d",
__func__, mComponentName.c_str(), pcmEncoding, codecPcmEncoding);
// 转换器不为空时,则重新设置原始输入格式中的PCM编码位深来覆盖上面设置的默认值(16bits)
mInputFormat->setInt32("pcm-encoding", pcmEncoding);
}
} else {
// 解码器时
// 设置输出buffer的PCM编码位深默认值(16bits)
(void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
// 创建输出buffer的PCM编码数据转换器
mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
if (mConverter[kPortIndexOutput] != NULL) {
ALOGD("%s: decoder %s output format pcm encoding converter from %d to %d",
__func__, mComponentName.c_str(), codecPcmEncoding, pcmEncoding);
// 转换器不为空时,则重新设置原始输入格式中的PCM编码位深来覆盖上面设置的默认值(16bits)
mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
}
}
}
// 最终返回当前方法执行结果错误码
return err;
}
1、setComponentRole(encoder /* isEncoder */, mime)实现分析:
设置组件角色信息
由于本章节接下来内容篇幅过长,因此必须放入另一章节分析,请查看:
【六】Android MediaPlayer整体架构源码分析 -【start请求播放处理流程】【Part 5】【02】
版权声明:本文为u012430727原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。