之前的版本,AF在Main_MediaServer.cpp里面启动,在android N,AF在main_audioserver.cpp里面启动,

AudioFlinger::instantiate()并不属于AudioFlinger的内部类,而是BinderService类的一个实现包括AudioFlinger,AudioPolicy等在内的几个服务都继承自这个统一的Binder的服务类,具体实现在BinderService.h中

// frameworks/native/include/binder/BinderService.h
static status_t publish(bool allowIsolated = false) {
sp sm(defaultServiceManager());
//SERVICE是文件中定义的一个模板,AudioFlinger调用了instantiate()函数,
//所以当前的SERVICE为AudioFlinger
return sm->addService(
String16(SERVICE::getServiceName()),
new SERVICE(), allowIsolated);
}

static void instantiate() { publish(); }

可以看出publish()函数所做的事获取到ServiceManager的代理,然后new一个调用instantiate的那个service的对象并把它添加到ServiceManager中。
所以下一步就是去分析AudioFlinger的构造函数了

AF是执行,APS是策略的规划者

audio的所有设备在AudioPolicyManager的构造函数里加载audio_policy.conf

void AudioPolicyService::onFirstRef()
{{Mutex::Autolock _l(mLock);// start tone playback threadmTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);// start audio commands threadmAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);// start output activity command threadmOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);mAudioPolicyClient = new AudioPolicyClient(this);mAudioPolicyManager = createAudioPolicyManager(mAudioPolicyClient);

从这可以看出AudioSystem是与其他接口的中间人如蓝牙或有线耳机能过 AudioSystem.setDeviceConnectionState接口来设置设备在audio系统的连接状态
过程中还创建了mtk自已的动作

extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
{audiopolicymanagerMTK = (AudioPolicyManagerCustomInterface*) new AudioPolicyManagerCustomImpl();  // MTK_AUDIOreturn new AudioPolicyManager(clientInterface, audiopolicymanagerMTK);
}


mHwModules从配置文件中audio_policy.conf收集

    for (size_t i = 0; i < mHwModules.size(); i++) {mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->getName());if (mHwModules[i]->mHandle == 0) {ALOGW("could not open HW module %s", mHwModules[i]->getName());continue;
audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
{sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();if (af == 0) {ALOGW("%s: could not get AudioFlinger", __func__);return AUDIO_MODULE_HANDLE_NONE;}return af->loadHwModule(name);
}

AudioPolicyManager的构造函数里用的是mpClientInterface的接口最终用的是AF的loadHwModule

// loadHwModule_l() must be called with AudioFlinger::mLock held
audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{for (size_t i = 0; i < mAudioHwDevs.size(); i++) {if (strncmp(mAudioHwDevs.valueAt(i)->moduleName(), name, strlen(name)) == 0) {ALOGW("loadHwModule() module %s already loaded", name);return mAudioHwDevs.keyAt(i);}}sp<DeviceHalInterface> dev;int rc = mDevicesFactoryHal->openDevice(name, &dev);

最终call到

status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {audio_hw_device_t *dev;status_t rc = load_audio_interface(name, &dev);if (rc == OK) {*device = new DeviceHalLocal(dev);}return rc;
}
static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{const hw_module_t *mod;int rc;rc = hw_get_module_by_class(AUDIO_HARDWARE_MODULE_ID, if_name, &mod);if (rc) {ALOGE("%s couldn't load audio hw module %s.%s (%s)", __func__,AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));goto out;}rc = audio_hw_device_open(mod, dev);if (rc) {ALOGE("%s couldn't open audio hw device in %s.%s (%s)", __func__,AUDIO_HARDWARE_MODULE_ID, if_name, strerror(-rc));goto out;}if ((*dev)->common.version < AUDIO_DEVICE_API_VERSION_MIN) {ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);rc = BAD_VALUE;audio_hw_device_close(*dev);goto out;}return OK;out:*dev = NULL;return rc;
}

加载并打开audio_hw_device

static inline int audio_hw_device_open(const struct hw_module_t* module,struct audio_hw_device** device)
{return module->methods->open(module, AUDIO_HARDWARE_INTERFACE,TO_HW_DEVICE_T_OPEN(device));
}
struct legacy_audio_module HAL_MODULE_INFO_SYM = {.module = {.common = {.tag = HARDWARE_MODULE_TAG,.module_api_version = AUDIO_MODULE_API_VERSION_0_1,.hal_api_version = HARDWARE_HAL_API_VERSION,.id = AUDIO_HARDWARE_MODULE_ID,.name = "MTK Audio HW HAL",.author = "MTK",.methods = &legacy_audio_module_methods,.dso = NULL,.reserved = {0},},},
    static struct hw_module_methods_t legacy_audio_module_methods = {.open = legacy_adev_open};

最终执行legacy_adev_open,打开执行这个 "MTK Audio HW HAL"接口的open,这里作一些针对底层hal接口的赋值动作

结合AF中的
audio_interfaces加载三大接口与对应的so与节点下面的设备

#define AUDIO_HARDWARE_MODULE_ID_PRIMARY "primary"
#define AUDIO_HARDWARE_MODULE_ID_A2DP "a2dp"
#define AUDIO_HARDWARE_MODULE_ID_USB "usb"static const char * const audio_interfaces[] = {AUDIO_HARDWARE_MODULE_ID_PRIMARY,AUDIO_HARDWARE_MODULE_ID_A2DP,AUDIO_HARDWARE_MODULE_ID_USB,
};

与配置文件audio_policy.conf
遍历节点primary加载audio.primary.mt6580.so

 primary {global_configuration {attached_output_devices AUDIO_DEVICE_OUT_SPEAKER|AUDIO_DEVICE_OUT_EARPIECEdefault_output_device AUDIO_DEVICE_OUT_SPEAKERattached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_FM_TUNER|AUDIO_DEVICE_IN_VOICE_CALLaudio_hal_version 3.0 }   devices {headset {type AUDIO_DEVICE_OUT_WIRED_HEADSETgains {gain_1 {mode AUDIO_GAIN_MODE_JOINTchannel_mask AUDIO_CHANNEL_OUT_

遍历节点a2dp加载audio.a2dp.default.so

  a2dp {global_configuration {audio_hal_version 2.0 }   outputs {a2dp {sampling_rates 44100channel_masks AUDIO_CHANNEL_OUT_STEREOformats AUDIO_FORMAT_PCM_16_BITdevices AUDIO_DEVICE_OUT_ALL_A2DP}   }   inputs {

遍历节点usb加载audio.usb.mt6580.so

  usb {global_configuration {audio_hal_version 2.0 }   outputs {usb_accessory {sampling_rates 44100channel_masks AUDIO_CHANNEL_OUT_STEREOformats AUDIO_FORMAT_PCM_16_BITdevices AUDIO_DEVICE_OUT_USB_ACCESSORY}   usb_device {sampling_rates dyna

在AF中加载三大接口

audio_module_handle_t AudioFlinger::loadHwModule(const char *name)
{if (name == NULL) {return AUDIO_MODULE_HANDLE_NONE;}if (!settingsAllowed()) {return AUDIO_MODULE_HANDLE_NONE;}Mutex::Autolock _l(mLock);return loadHwModule_l(name);
}

从audio_policy.conf文件中可以发现,系统包含了primary、a2dp、usb等音频接口,对应着系统中的audio.<primary/a2dp/usb>..so。每个音频接口中又包含了若干个outputs & inputs,并且每个output or input又包含了若干个devices,且还有采样频率,声道数等信息。这些devices信息、采样频率信息 & 声道信息等都会保存在各自module的IOProfile中。按上文中audio_policy.conf配置文件所描述,系统最后会生成6个modules(eg.primary,a2dp,hdmi,r_submix,hs_usb & usb)以及7个outputs。以AUDIO_DEVICE_OUT_SPEAKER为例,该device会定义在primary模块中outputs所属的IOProfile1中,其它设备依次类推

audiomanager.setParameters会call到AudioFlinger::setParameters

status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
{ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",ioHandle, keyValuePairs.string(), IPCThreadState::self()->getCallingPid());// check calling permissionsif (!settingsAllowed()) {return PERMISSION_DENIED;}// AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interfaceif (ioHandle == AUDIO_IO_HANDLE_NONE) {Mutex::Autolock _l(mLock);// result will remain NO_INIT if no audio device is presentstatus_t final_result = NO_INIT;{AutoMutex lock(mHardwareLock);mHardwareStatus = AUDIO_HW_SET_PARAMETER;for (size_t i = 0; i < mAudioHwDevs.size(); i++) {sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();status_t result = dev->setParameters(keyValuePairs);// return success if at least one audio device accepts the parameters as not all// HALs are requested to support all parameters. If no audio device supports the// requested parameters, the last error is reported.
            status_t result = **dev->setParameters**(keyValuePairs);

对于HAL以上的逻辑,只需找到ID和相应的名字即可找到需要使用的模块,即使你有100个厂商,100个厂商又有100个模块,还是依照明确的标准去走,这个就是面向对象编程中的一个核心理念,面向接口编程,不管你逻辑如何变,接口一定不能变!这样就能确保软件的低耦合,可移植

.id = AUDIO_HARDWARE_MODULE_ID,

.name = “MTK Audio HW HAL”,

vendor/mediatek/proprietary/hardware/audio/common/aud_drv/audio_hw_hal.cpp

ladev->device.set_parameters = adev_set_parameters; 
  static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs) {
#ifdef AUDIO_HAL_PROFILE_ENTRY_FUNCTIONAudioAutoTimeProfile _p(__func__);
#endifstruct legacy_audio_device *ladev = to_ladev(dev);return ladev->hwif->setParameters(String8(kvpairs));}

其中
ladev->hwif->setParameters(String8(kvpairs));
ladev->hwif = createMTKAudioHardware();

AudioMTKHardwareInterface *AudioMTKHardwareInterface::create() {/** FIXME: This code needs to instantiate the correct audio device* interface. For now - we use compile-time switches.*/AudioMTKHardwareInterface *hw = 0;char value[PROPERTY_VALUE_MAX];ALOGV("Creating MTK AudioHardware");//hw = new android::AudioALSAHardware();hw = android::AudioALSAHardware::GetInstance();return hw;}extern "C" AudioMTKHardwareInterface *createMTKAudioHardware() {/** FIXME: This code needs to instantiate the correct audio device* interface. For now - we use compile-time switches.*/return AudioMTKHardwareInterface::create();}

即ladev->hwif是AudioALSAHardware中的实例,所调用的为它的方法
即最终所调用的是

status_t AudioALSAHardware::setParameters(const String8 &keyValuePairs) {

AudioALSAHardware::setParameters()是Audio setParameters()的最终执行函数

如下数据结构中 struct audio_stream_out stream;为AF/APS需要用的,属于平台无关数据

struct legacy_stream_out {struct audio_stream_out stream;AudioMTKStreamOutInterface *legacy_out;
};


vendor/mediatek/proprietary/hardware/audio/common/aud_drv/audio_hw_hal.cpp

out->legacy_out = ladev->hwif->openOutputStreamWithFlags(devices, flags,(int *) &config->format,&config->channel_mask,&config->sample_rate, &status);

AudioALSAHardware.cpp

AudioMTKStreamOutInterface *AudioALSAHardware::openOutputStreamWithFlags(uint32_t devices,audio_output_flags_t flags,int *format,uint32_t *channels,uint32_t *sampleRate,status_t *status) {return mStreamManager->openOutputStream(devices, format, channels, sampleRate, status, flags);//AudioMTKStreamOutInterface *AudioALSAStreamManager::openOutputStream(}

之后
AudioALSAStreamManager.cpp中如下返回的是AudioALSAStreamOut

AudioMTKStreamOutInterface *AudioALSAStreamManager::openOutputStream(

则out_write对应的out->legacy_out为AudioALSAStreamOut则struct audio_stream_out *stream的out_write就变为AudioALSAStreamOut::write()

    static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,size_t bytes) {
#ifdef AUDIO_HAL_PROFILE_ENTRY_FUNCTIONAudioAutoTimeProfile _p(__func__, AUDIO_HAL_FUNCTION_WRITE_NS);
#endifstruct legacy_stream_out *out =reinterpret_cast<struct legacy_stream_out *>(stream);return out->legacy_out->write(buffer, bytes);}

转 https://blog.csdn.net/bberdong/article/details/78346729
Audio write数据流程,
AudioTrack->write
AudioFlinger::PlaybackThread::threadLoop_write()
mNormalSink->write
而mNormalSink其实是NBAIO_Sink,实现类是:AudioStreamOutSink
那我们直接看
frameworks/av/media/libnbaio/AudioStreamOutSink.cpp

//AudioStreamOutSink::write节选
status_t ret = mStream->write(buffer, count * mFrameSize, &written);
//AudioStreamOutSink.h
sp<StreamOutHalInterface> mStream;

果然,mStream类型变成了StreamOutHalInterface(Android 5.1上是audio_stream_out类型)

然后,我们发现frameworks/av/media/底下多了个文件夹
libaudiohal

Android.mk                DeviceHalLocal.h             DevicesFactoryHalLocal.h  EffectHalHidl.h             EffectsFactoryHalLocal.h  StreamHalLocal.h
ConversionHelperHidl.cpp  DevicesFactoryHalHidl.cpp    EffectBufferHalHidl.cpp   EffectHalLocal.cpp          HalDeathHandlerHidl.cpp
ConversionHelperHidl.h    DevicesFactoryHalHidl.h      EffectBufferHalHidl.h     EffectHalLocal.h            include
DeviceHalHidl.cpp         DevicesFactoryHalHybrid.cpp  EffectBufferHalLocal.cpp  EffectsFactoryHalHidl.cpp   StreamHalHidl.cpp
DeviceHalHidl.h           DevicesFactoryHalHybrid.h    EffectBufferHalLocal.h    EffectsFactoryHalHidl.h     StreamHalHidl.h
DeviceHalLocal.cpp        DevicesFactoryHalLocal.cpp   EffectHalHidl.cpp         EffectsFactoryHalLocal.cpp  StreamHalLocal.cpp

很明显,从文件名命名方式来看,一类是以Hidl结尾,一类是Local结尾,很明显!Local结尾的应该是兼容之前的方式,
StreamOutHalInterface的实现类就在这底下了:

class StreamOutHalHidl : public StreamOutHalInterface, public StreamHalHidl

继续write流程
StreamOutHalHidl::write
callWriterThread(WriteCommand::WRITE,…

//StreamOutHalHidl::callWriterThread
if (!mCommandMQ->write(&cmd)) {ALOGE("command message queue write failed for \"%s\"", cmdName);return -EAGAIN;}if (data != nullptr) {size_t availableToWrite = mDataMQ->availableToWrite();if (dataSize > availableToWrite) {ALOGW("truncating write data from %lld to %lld due to insufficient data queue space",(long long)dataSize, (long long)availableToWrite);dataSize = availableToWrite;}if (!mDataMQ->write(data, dataSize)) {ALOGE("data message queue write failed for \"%s\"", cmdName);}}

mDataMQ:

typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
看本文件的顶部: #include <fmq/MessageQueue.h>

好吧。fmq!

先看看WriteCommand

//StreamHalHidl.h

using WriteCommand = ::android::hardware::audio::V2_0::IStreamOut::WriteCommand;

1
2
到这里很明显能看出Binder化的痕迹了。这是要开始跨进程调用了!
fmq(Fast Message Queue)就是实现这种跨进程的关键!
编译hardware/interfaces/audio模块的输出:
/out/soong/.intermediates/hardware/interfaces/audio/2.0/android.hardware.audio@2.0_genc++/gen/android/hardware/audio/2.0目录下面:
DeviceAll.cpp DevicesFactoryAll.cpp PrimaryDeviceAll.cpp StreamAll.cpp StreamInAll.cpp StreamOutAll.cpp StreamOutCallbackAll.cpp types.cpp
这些文件自动生成出来,然后可以实现audioflinger通过libaudiohal模块,binder化地调用hal!

现在回到:
/hardware/interfaces/audio/2.0/
default里已经有一堆实现好的代码了(server端)
还是用write接口举例:
vendor/mediatek/proprietary/hardware/audio/common/service/2.0/StreamOut.cpp

bool WriteThread::threadLoop() {// This implementation doesn't return control back to the Thread until it// decides to stop,// as the Thread uses mutexes, and this can lead to priority inversion.while (!std::atomic_load_explicit(mStop, std::memory_order_acquire)) {uint32_t efState = 0;mEfGroup->wait(static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY),&efState);if (!(efState &static_cast<uint32_t>(MessageQueueFlagBits::NOT_EMPTY))) {continue;  // Nothing to do.}if (!mCommandMQ->read(&mStatus.replyTo)) {continue;  // Nothing to do.}switch (mStatus.replyTo) {case IStreamOut::WriteCommand::WRITE:ALOGE("zyk IStreamOut::WriteCommand::WRITE: %d", mStatus.replyTo);doWrite();break;
//WriteThread::threadLoop
case IStreamOut::WriteCommand::WRITE:doWrite();
//StreamOut.cpp
ssize_t writeResult = mStream->write(mStream, &mBuffer[0], availToRead);
//StreamOut.h
audio_stream_out_t *mStream;

接下来,通过函数指针,白转千回找到

vendor/mediatek/proprietary/hardware/audio/common/aud_drv/audio_hw_hal.cpp

调用out_write函数,然后有的平台如高通调用pcm_write从而进入tinyAlsa驱动的流程就不表了,和以前的流程应该大同小异

在写的函数AudioALSAStreamOut::write()中发现为standby模式会重新打开open会调用如下开启线程
AudioALSAStreamOut->open

./vendor/mediatek/proprietary/hardware/audio/common/V3/aud_drv/AudioALSAStreamManager.cpp

createPlaybackHandler

AudioALSAPlaybackHandlerBTCVSD

createCaptureHandler
AudioALSACaptureHandlerBT

catpture from mic

mtk6580 mic 所使用的mix ctl是
AUDIO_DEVICE_BUILTIN_DUAL_MIC
“builtin_Mic_DualMic”

./device/mediatek/mt6580/audio_device.xml:84:
./device/mediatek/mt6580/audio_device.xml-85-
./device/mediatek/mt6580/audio_device.xml-86-
./device/mediatek/mt6580/audio_device.xml-87-
./device/mediatek/mt6580/audio_device.xml-88-

./device/mediatek/mt6580/audio_device.xml:90:
./device/mediatek/mt6580/audio_device.xml-91-
./device/mediatek/mt6580/audio_device.xml-92-
./device/mediatek/mt6580/audio_device.xml-93-

默认adc值为下标为0的“ADC1"
static const char * const Pmic_Digital_Mux[] = { “ADC1”, “ADC2”, “ADC3”, “ADC4” };
如图左边是默认的状态,右边是切到mic录音模式时的mix ctl 状态

ms,(s:271596503415,e:271600163031)
[  271.600678] <2>-(1)[1611:AudioBTCVSDLoop][name:mt_soc_pcm_capture&]mtk_capture_pcm_pointer, buffer overflow u4DMAReadIdx:78, u4WriteIdx:a8, u4DataRemained:1030, u4BufferSize:1000
[  271.600697] <2>-(1)[1611:AudioBTCVSDLoop][name:mt_soc_pcm_capture&]mtk_capture_alsa_stop
./sound/soc/mediatek/mt6580/mt_soc_pcm_capture.c:400:static int mtk_capture_alsa_start(struct snd_pcm_substream *substream)
./sound/soc/mediatek/mt6580/mt_soc_pcm_capture.c:402:   pr_warn("mtk_capture_alsa_start\n");
./sound/soc/mediatek/mt6580/mt_soc_pcm_capture.c:414:       return mtk_capture_alsa_start(substream);

play on speaker

[  271.602486] <2> (1)[1611:AudioBTCVSDLoop][name:mt_soc_codec_63xx&]mt63xx_codec_prepare set up SNDRV_PCM_STREAM_CAPTURE rate = 8000./sound/soc/mediatek/mt6580/mt_soc_codec_63xx.c:827:static int mt63xx_codec_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *Daiport)
./sound/soc/mediatek/mt6580/mt_soc_codec_63xx.c:830:        pr_warn("mt63xx_codec_prepare set up SNDRV_PCM_STREAM_CAPTURE rate = %d\n",
./sound/soc/mediatek/mt6580/mt_soc_codec_63xx.c:835:        pr_warn("mt63xx_codec_prepare set up SNDRV_PCM_STREAM_PLAYBACK rate = %d\n",
./sound/soc/mediatek/mt6580/mt_soc_codec_63xx.c:859:    .prepare = mt63xx_codec_prepare,

使用栈跟踪代码cpp

在你的模块的Android.mk添加libutils动态库:
LOCAL_SHARED_LIBRARIES :=

libutils
(2). 在你需要获取native调用栈的位置定义android::CallStack对象,即可将调用栈输出到main log里:
#include <utils/CallStack.h>
android::CallStack stack(“TAG”);
/* add this code at necessary place /
注意带
android namespace要在前面加#include <utils/CallStack.h> /
add this line */,否则不生效
如:

#define calc_time_diff(x,y) ((x.tv_sec - y.tv_sec )+ (double)( x.tv_nsec - y.tv_nsec ) / (double)1000000000)
#include <utils/CallStack.h> /* add this line */namespace android {

mtk audio笔记相关推荐

  1. android开关机动画和铃声配置 MTK编译笔记

    1, MTK平台开关机动画配置, 将需要调整的开关机动画和铃声放到alps\frameworks\base\data\sounds 这个目录下面,打开 该目录下的Android.mk文件,增加下面的编 ...

  2. Android音频子系统(十)------MTK Audio录音流程代码解析

    你好!这里是风筝的博客, 欢迎和我一起交流. Android framework中的代码每个平台基本都是大同小异,只有Hal上代码才是厂商特制,每个平台都不相同,这里以MTK平台为例,记录下MTK平台 ...

  3. MTK audio 总结

    关于audio相关的名称解释如下: mic( 麦克风):声音输入源: speaker(听筒)loadspeaker(喇叭) handset(耳机):声音输出源: 音频解码器(eg:ALC5651):音 ...

  4. MTK audio tuning tool

    解决MTK userdebug版无法使用audio工具的问题

  5. MT6592 audio笔记

    1.模拟增益的设置 在Audio_ver1_volume_custom_default.h中 #define VER1_AUD_NORMAL_VOLUME_DEFAULT \     128,128, ...

  6. mtk android 笔记,Android驱动笔记(9)——MTK平台Sensor Bring Up

    sensor的img位于scp.img 9.1.在成熟的平台Bring Up 在已知的平台上驱动并验证一个器件是很常见的工作.因为管脚等基本不需要重新配置.所以只需要按照一般步骤驱动即可.驱动代码位置 ...

  7. 在MTK平台配置一个支持smartPA的audio驱动

    文章目录 smartPA概述 smartPA AW87319概述 smartPA AW87319功能特性 在kernel中添加对smartPA的支持 1. 在配置文件中添加对smartPA的支持 2. ...

  8. Android ALSA音频系统架构分析(1)----从Loopback了解Audio

    /*********************************** * Author:刘江明 * Environment:MTK Android 6.0 * Date:2017年05月25日 * ...

  9. MT6737 Android N 平台 Audio系统学习----录音到播放录音流程分析

    本文将从主mic录音到播放流程来进行学习mtk audio系统架构.  在AudioFlinger::RecordThread::threadLoop中会调用mInput->stream-> ...

  10. [MTK] mt6575等相关的手机开发资料(PDF文档)

    https://github.com/luckasfb/Development_Documents/tree/master/MTK-Mediatek-Alps-Documents + 号开头的为目录, ...

最新文章

  1. Enrichment plot的另一种展示
  2. Keras-数据增广
  3. react提交数据到数据库_React型关系数据库事务
  4. 清华数学能赶超北大?北大数学院士已达8人,清华引进2位菲尔兹奖
  5. Tensorflow一些常用基本概念与函数(二)
  6. 使用SecueCRT在本地主机与远程主机之间交互文件
  7. 循环队列(循环数组)中元素个数的计算
  8. 德州扑克AI--Programming Poker AI(译)
  9. Spark sample入门到精通
  10. android unit(px,dp,dip,sp)
  11. HBase流程框架图
  12. 阿里云ECS学习资源
  13. Oracle EBS R12关于“PO 通信输出”相关问题处理
  14. 程序员今年必看!!拖更了三年带回了一个抖音,虎牙,哔哩哔哩都在用的库|墙裂推荐
  15. Buffon投针实验
  16. 纯干货,面试题分享,让你打有准备的战!
  17. 基于遗传算法的多目标优化算法(附代码案例)
  18. jdk1.6,1.7,1.8解压版无需安装(64位)
  19. MySQL将字段数据自增自减
  20. 聊一聊,如何做好垂直域稳定性

热门文章

  1. 不重启Windows使环境变量快速生效
  2. 未能加载文件或程序集“Newtonsoft.Json解决方法
  3. 人脸识别智能门禁D508也能“码”上开门
  4. Python学习_038.列表_排序_revered逆序_max_min_sum
  5. 【系统教程】Windows 11开机后任务栏假死、无响应等问题
  6. 一阶电路实验报告心得_一阶rc电路的暂态响应实验报告分析
  7. 多伦多大学计算机专业硕士,多伦多大学计算机硕士专业 看你满足录取要求吗...
  8. Spring Web 编程详解
  9. 故障恢复控制台命令全攻略
  10. Laya 微信小游戏登录问题