通过Android NDK读写声卡通过 AudioRecord和AudioTrack两个类实现。

AudioTrack:负责声音数据的输出
AudioRecord:负责声音数据的采集

  • 相关头文件位置

system/media/audio/include/system
 ├── audio-base.h
├── audio-base-utils.h
├── audio_effect-base.h
├── audio_effect.h
├── audio_effects
├── audio.h
├── audio_policy.h
└── sound_trigger.h

音频源:
typedef enum {
    AUDIO_SOURCE_DEFAULT = 0, //默认输入源
    AUDIO_SOURCE_MIC = 1, //Microphone audio source 麦克风输入源
    AUDIO_SOURCE_VOICE_UPLINK = 2, //Voice call uplink (Tx) audio source 语音呼叫上行(Tx)输入源
    AUDIO_SOURCE_VOICE_DOWNLINK = 3, //Voice call downlink (Rx) audio source 语音呼叫下行(Rx)输入源
    AUDIO_SOURCE_VOICE_CALL = 4,     //Voice call uplink + downlink audio source 语音呼叫上下行输入源
    AUDIO_SOURCE_CAMCORDER = 5,      //Microphone audio source tuned for video recording 视频录制的麦克风音频源
    AUDIO_SOURCE_VOICE_RECOGNITION = 6, //Microphone audio source tuned for voice recognition 针对语音唤醒的输入源
    AUDIO_SOURCE_VOICE_COMMUNICATION = 7, //Microphone audio source tuned for voice communications such as VoIP 针对VOIP语音的输入源
    AUDIO_SOURCE_REMOTE_SUBMIX = 8,
    AUDIO_SOURCE_UNPROCESSED = 9,
    AUDIO_SOURCE_VOICE_PERFORMANCE = 10,
    AUDIO_SOURCE_ECHO_REFERENCE = 1997,
    AUDIO_SOURCE_FM_TUNER = 1998,
#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
    /**
     * A low-priority, preemptible audio source for for background software
     * hotword detection. Same tuning as VOICE_RECOGNITION.
     * Used only internally by the framework.
     */
    AUDIO_SOURCE_HOTWORD = 1999,
#endif // AUDIO_NO_SYSTEM_DECLARATIONS
} audio_source_t;

typedef enum {
    AUDIO_SESSION_OUTPUT_STAGE = -1, // (-1)
    AUDIO_SESSION_OUTPUT_MIX = 0,
    AUDIO_SESSION_ALLOCATE = 0,
    AUDIO_SESSION_NONE = 0,
} audio_session_t;

//音频格式
typedef enum {//省略部分定义
    AUDIO_FORMAT_INVALID             = 0xFFFFFFFFu,
    AUDIO_FORMAT_DEFAULT             = 0,
    AUDIO_FORMAT_PCM                 = 0x00000000u,
    AUDIO_FORMAT_MP3                 = 0x01000000u,
    AUDIO_FORMAT_AMR_NB              = 0x02000000u,
    /* Subformats */
    AUDIO_FORMAT_PCM_SUB_16_BIT        = 0x1u, 
    AUDIO_FORMAT_PCM_SUB_8_BIT         = 0x2u,
    AUDIO_FORMAT_PCM_SUB_32_BIT        = 0x3u,
    AUDIO_FORMAT_PCM_SUB_8_24_BIT      = 0x4u,
    AUDIO_FORMAT_PCM_SUB_FLOAT         = 0x5u,
    AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED = 0x6u,

/* Aliases */
    AUDIO_FORMAT_PCM_16_BIT            = 0x1u,        // (PCM | PCM_SUB_16_BIT) //PCM16位
    AUDIO_FORMAT_PCM_8_BIT             = 0x2u,        // (PCM | PCM_SUB_8_BIT)  //PCM 8位
    AUDIO_FORMAT_PCM_32_BIT            = 0x3u,        // (PCM | PCM_SUB_32_BIT)
    AUDIO_FORMAT_PCM_8_24_BIT          = 0x4u,        // (PCM | PCM_SUB_8_24_BIT)
    AUDIO_FORMAT_PCM_FLOAT             = 0x5u,        // (PCM | PCM_SUB_FLOAT)
    AUDIO_FORMAT_PCM_24_BIT_PACKED     = 0x6u,        // (PCM | PCM_SUB_24_BIT_PACKED)
    AUDIO_FORMAT_AAC_MAIN              = 0x4000001u,  // (AAC | AAC_SUB_MAIN)
    AUDIO_FORMAT_AAC_LC                = 0x4000002u,  // (AAC | AAC_SUB_LC)
    AUDIO_FORMAT_AAC_SSR               = 0x4000004u,  // (AAC | AAC_SUB_SSR)
  
} audio_format_t;

enum {//省略部分定义
    AUDIO_CHANNEL_REPRESENTATION_POSITION   = 0x0u,
    AUDIO_CHANNEL_REPRESENTATION_INDEX      = 0x2u,
    AUDIO_CHANNEL_NONE                      = 0x0u,
    AUDIO_CHANNEL_INVALID                   = 0xC0000000u,

AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
    AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
    AUDIO_CHANNEL_IN_TOP_RIGHT              = 0x400000u,
    AUDIO_CHANNEL_IN_VOICE_UPLINK           = 0x4000u,
    AUDIO_CHANNEL_IN_VOICE_DNLINK           = 0x8000u,
    AUDIO_CHANNEL_IN_MONO                   = 0x10u,     // IN_FRONT         //单声道
    AUDIO_CHANNEL_IN_STEREO                 = 0xCu,      // IN_LEFT | IN_RIGHT   立体声
    AUDIO_CHANNEL_IN_FRONT_BACK             = 0x30u,     // IN_FRONT | IN_BACK
    AUDIO_CHANNEL_IN_6                      = 0xFCu,     // IN_LEFT | IN_RIGHT | IN_FRONT | IN_BACK | IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED
    AUDIO_CHANNEL_IN_2POINT0POINT2          = 0x60000Cu, // IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT
    AUDIO_CHANNEL_IN_2POINT1POINT2          = 0x70000Cu, // IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY
    AUDIO_CHANNEL_IN_3POINT0POINT2          = 0x64000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT
    AUDIO_CHANNEL_IN_3POINT1POINT2          = 0x74000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY
    AUDIO_CHANNEL_IN_5POINT1                = 0x17000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY
    AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO      = 0x4010u,   // IN_VOICE_UPLINK | IN_MONO
    AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO      = 0x8010u,   // IN_VOICE_DNLINK | IN_MONO
    AUDIO_CHANNEL_IN_VOICE_CALL_MONO        = 0xC010u,   // IN_VOICE_UPLINK_MONO | IN_VOICE_DNLINK_MONO
};

typedef enum {
    AUDIO_INPUT_FLAG_NONE       = 0x0,
    AUDIO_INPUT_FLAG_FAST       = 0x1,
    AUDIO_INPUT_FLAG_HW_HOTWORD = 0x2,
    AUDIO_INPUT_FLAG_RAW        = 0x4,
    AUDIO_INPUT_FLAG_SYNC       = 0x8,
    AUDIO_INPUT_FLAG_MMAP_NOIRQ = 0x10,
    AUDIO_INPUT_FLAG_VOIP_TX    = 0x20,
    AUDIO_INPUT_FLAG_HW_AV_SYNC = 0x40,
#ifndef AUDIO_NO_SYSTEM_DECLARATIONS  // TODO: Expose at HAL interface, remove FRAMEWORK_FLAGS mask
    AUDIO_INPUT_FLAG_DIRECT     = 0x80,
    AUDIO_INPUT_FRAMEWORK_FLAGS = AUDIO_INPUT_FLAG_DIRECT,
#endif
} audio_input_flags_t;

enum {
    AUDIO_IO_HANDLE_NONE = 0,
    AUDIO_MODULE_HANDLE_NONE = 0,
    AUDIO_PORT_HANDLE_NONE = 0,
    AUDIO_PATCH_HANDLE_NONE = 0,
};

TRANSFER_CALLBACK    通过回调函数传输数据
TRANSFER_OBTAIN       
TRANSFER_SYNC        
TRANSFER_DEFAULT

  • demo源码:

├── Android.mk
├── include
└── src
    └── audio_main.cpp

audio_main.cpp:

#include <stdio.h>
#include <pthread.h>
#include <math.h>
#include <system/audio.h>
#include <media/AudioRecord.h>
#include <media/AudioTrack.h>using namespace android;sp<AudioRecord> mAudioRecord;
sp<AudioTrack> mAudioTrack;
FILE *g_read_pcm = NULL;
FILE *g_write_pcm = NULL;
audio_channel_mask_t channelmask = AUDIO_CHANNEL_IN_MONO;
audio_format_t audio_format = AUDIO_FORMAT_PCM_16_BIT;
int sample_rate = 16000;
int min_buf_size = 0;void read_audio_data(int event, void *user, void *info)
{if (event != AudioRecord::EVENT_MORE_DATA) {printf("%s: event: %d\n", __FUNCTION__, event);return;}AudioRecord::Buffer *buffer = static_cast<AudioRecord::Buffer *>(info);if (buffer->size == 0) {return;}//printf("%s: buf size: %d\n", __FUNCTION__, buffer->size);fwrite(buffer->raw, buffer->size, 1, g_write_pcm);
}//read from soundcard and write into file
int ndk_audio_read()
{int ret = 0;char file[256] = {'\0'};size_t frame_count = 0;int frame_size = 0;String16  strName = String16("reader");mAudioRecord = new AudioRecord(strName);mAudioRecord.get();status_t result = AudioRecord::getMinFrameCount(&frame_count, sample_rate,audio_format, channelmask);if (result == NO_ERROR) {int channel_count = popcount(channelmask);min_buf_size = frame_count * channel_count * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);} else if (result == BAD_VALUE) {printf("Invalid param when get min frame count\n");return -1;} else {printf("Faield to get min frame count\n");return -1;}min_buf_size *= 2;// To prevent "buffer overflow" issueif (min_buf_size > 0) {printf("get min buf size[%d]\n", min_buf_size);} else {printf("get min buf size failed\n");return -1;}frame_size = popcount(channelmask) * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);frame_count = min_buf_size / frame_size;ret = mAudioRecord->set(AUDIO_SOURCE_MIC,sample_rate,audio_format,channelmask,frame_count,read_audio_data,NULL,0,false,AUDIO_SESSION_ALLOCATE,AudioRecord::TRANSFER_CALLBACK,AUDIO_INPUT_FLAG_FAST,getuid(),getpid(),NULL,AUDIO_PORT_HANDLE_NONE);if (ret != NO_ERROR) {printf("AudioRecord set failure\n");return -1;}else{printf("set success\n");}if (mAudioRecord->initCheck() != NO_ERROR) {printf("AudioRecord initialization failed!");return -1;}snprintf(file, 256, "/data/ndksound.pcm");g_write_pcm = fopen(file, "wb");ret = mAudioRecord->start();if (ret != NO_ERROR) {printf("Audio Record start failure ret: [%d]", ret);}return 0;
}void write_audio_data(int event, void *user, void *info)
{if (event != AudioTrack::EVENT_MORE_DATA) {printf("soundcard writer event: %d\n", event);return;}AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);if (buffer->size == 0) {return;}memset(buffer->raw, 0, buffer->size);int ret = fread(buffer->raw, 1, buffer->size, g_read_pcm);if (ret <= 0) {printf("%s: no more data:%d\n", __FUNCTION__, ret);exit(1);}
}//read from file and write into soundcard
int ndk_audio_write()
{int ret = 0;char file[256] = {'\0'};size_t frame_count = 0;int frame_size = 0;mAudioTrack = new AudioTrack();mAudioTrack.get();status_t result = AudioTrack::getMinFrameCount(&frame_count, AUDIO_STREAM_DEFAULT,sample_rate);if (result == NO_ERROR) {int channel_count = popcount(channelmask);min_buf_size = frame_count * channel_count * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);} else if (result == BAD_VALUE) {printf("Invalid param when get min frame count\n");return -1;} else {printf("Faield to get min frame count\n");return -1;}if (min_buf_size > 0) {printf("get min buf size[%d]\n", min_buf_size);} else {printf("get min buf size failed\n");return -1;}channelmask = AUDIO_CHANNEL_OUT_MONO;frame_size = popcount(channelmask) * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);frame_count = min_buf_size / frame_size;ret = mAudioTrack->set(AUDIO_STREAM_VOICE_CALL,sample_rate,audio_format,channelmask,frame_count,AUDIO_OUTPUT_FLAG_FAST,write_audio_data,NULL,0,0,false,AUDIO_SESSION_ALLOCATE,AudioTrack::TRANSFER_CALLBACK,NULL,-1);if (ret != NO_ERROR) {printf("mAudioTrack set failure\n");return -1;}else{printf("set success\n");}if (mAudioTrack->initCheck() != NO_ERROR) {printf("mAudioTrack initialization failed!");return -1;}snprintf(file, 256, "/data/ndksound.pcm");g_read_pcm = fopen(file, "rb");if (!g_read_pcm) {printf("open file failed\n");return -1;}ret = mAudioTrack->start();if (ret != NO_ERROR) {printf("Audio Track start failure ret: [%d]", ret);return -1;}printf("start success\n");return 0;
}int main(int argc, char *argv[])
{int ret = 0;if (argc < 2) {printf("need 2 param\n");return -1;}if (0 == strcmp(argv[1], "read")) {printf("read soundcard\n");ret = ndk_audio_read();if (ret < 0) {exit(1);}} else {printf("write soundcard\n");ret = ndk_audio_write();if (ret < 0) {exit(1);}}while (1) {sleep(5);}if (g_read_pcm) {fclose(g_read_pcm);}if (g_write_pcm) {fclose(g_write_pcm);}return 0;
}

Android.mk

LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)LOCAL_SRC_FILES += \src/audio_main.cppLOCAL_C_INCLUDES += \bionic \external/stlport/stlport \external/libcxx/include \frameworks/av/include \frameworks/av/media/libaudioclient/include \frameworks/native/libs/nativebase/include \frameworks/native/libs/math/include \frameworks/av/media/ndk/include \system/core/include \system/core/libprocessgroup/include \system/core/base/include \system/core/libutils/include \LOCAL_CFLAGS := -DANDROID -Wall -Wno-implicit-function-declaration  -Wl,--unresolved-symbols=ignore-allLOCAL_MODULE := ndk_audio
LOCAL_LDLIBS := -lm -lmediandk -landroid -laudioclient -lstdc++ -lutilsinclude $(BUILD_EXECUTABLE)
  • demo运行

从声卡读声音数据写到文件: ./ndk_audio  read

从文件读声音数据写到声卡: ./ndk_audio write

音频之Android NDK读写声卡相关推荐

  1. android 录音命令,音频延迟  |  Android NDK  |  Android Developers

    延迟是指信号在系统中传输所需的时间.下面是常见类型的音频应用相关延迟时间:音频输出延迟时间是指从应用生成音频样本到样本通过耳机插孔或内置扬声器播放之间经历的时间. 音频输入延迟时间是指设备音频输入装置 ...

  2. Android NDK开发之旅31 FFmpeg音频解码

    ###前言 #####基于Android NDK开发之旅30--FFmpeg视频播放这篇文章,我们已经学会视频解码基本过程.这篇文章就对音频解码进行分析. #####音频解码和视频解码的套路基本是一样 ...

  3. Android NDK开发之旅29 云服务器Ubuntu下搭建NDK环境,并编译FFmpeg

    ###前言 因为在Linux环境下编译FFmpeg生成库和头文件下比较方便,所以接下来主要操作在Linux环境下进行.但是对于Android NDK 开发新手来说,自己电脑配置Ubuntu Linux ...

  4. Android NDK开发之 NEON基础介绍

    原文:http://blog.csdn.net/app_12062011/article/details/50434259 Android NDK开发之 NEON基础介绍 这是官方介绍: http:/ ...

  5. 【Android 逆向】Android 进程注入工具开发 ( 系统调用 | Android NDK 中的系统调用示例 )

    文章目录 一.系统调用 二.Android NDK 中的系统调用示例 一.系统调用 在 " 用户层 " , 运行的都是用户应用程序 ; 用户层 下面 是 驱动层 , 驱动层 下面是 ...

  6. CMake 构建项目Android NDK项目基础知识

    本篇文章将介绍如何使用 CMake 构建实现你的第一个 NDK 项目. ##前言 你好!欢迎来到我的的学习笔记分享系列,第一次给大家分享的是 Android NDK 开发的学习笔记,让我们先开始了解 ...

  7. Android NDK 减少 so 库体积方法总结

    1. 背景 基于亚马逊 AVS Device SDK 改造的全链路语音 SDK 最终编译的动态库有几十个,单架构动态库大小有几十兆,之前在Iot设备中勉强跑着,但是这个体积对于手机应用来说是致命的,各 ...

  8. 苹果usbc音频android,苹果USB-C音频线Android手机能用么?实测10款手机仅1款不支持...

    原标题:苹果USB-C音频线Android手机能用么?实测10款手机仅1款不支持 苹果在新iPad Pro上取消了3.5mm耳机孔和Lightning接口,改为USB Type-C接口.这个接口既可以 ...

  9. Android NDK(ndk-r16b)交叉编译FFmpeg(3.3.9)

    1.下载FFmpeg(3.3.9) ffmpeg官网:Index of /releases ffmpeg-3.3.9下载链接: http://www.ffmpeg.org/releases/ffmpe ...

最新文章

  1. Python知识点1——基础
  2. MySQL测试环境遇到 mmap(xxx bytes) failed; errno 12解决方法
  3. xssfsheet removerow 剩下空白行怎么处理_你看不上的农业会计!我却凭借其账务处理,过上你求之不得的生活...
  4. MySQL数据类型中DECIMAL的作用和用法
  5. 七牛大数据平台的演进与大数据分析实践--转
  6. openfire SparkWeb 安装配置
  7. php的set 容器,关于STL中set容器的一些总结
  8. 编译Mysql 5.5时报do_abi_check错误
  9. Atitit webservice发现机制 WS-Discovery标准的规范attilax总结
  10. 老友记第一季自学笔记01
  11. 单片机技术及应用:基于proteus仿真的c语言程序设计,单片机的C语言程序设计与应用:基于Proteus仿真(第4版)...
  12. speedoffice(Excel)图片上怎么添加文字
  13. Tomcat运行黄色叉号
  14. 微信支付(公众号支付)微信公众平台开发教程(5)
  15. Ubuntu在线音乐盒-亦歌
  16. 操作系统第6次实验报告:使用信号
  17. 简单JS小案例:五星好评
  18. word交叉引用、连续多文献引用等
  19. SICK LMS111 雷达 ROS 上安装测试
  20. 虚拟机Terminator终端终结者安装教程

热门文章

  1. 回顾10年程序员职场经历
  2. Java基础篇------抽象类详解
  3. 【缘起•看见】公益捐书宝鸡第七场——走进岐山县凤鸣镇杏园逸夫小学
  4. 给Windows文件夹添加备注
  5. 协方差矩阵与多元正态分布
  6. ios swift5 日期时间显示器 UIDatePicker
  7. 谈谈css中的伪类和伪元素,谈谈css中的伪类和伪元素
  8. 故障分析 | undo log 长时间处于清理状态导致备份失败
  9. linux 页缓存 读写,实验5Linux文件操作之带缓存和非缓冲文件的读写(10页)-原创力文档...
  10. 笛卡尔积的解释和作用