游戏录像的时候怎么把游戏声音。麦克风怎么把声音变大声音和别的软件的音频分开

我需要同时录制屏幕,声卡和麦克风的声音。
代码是从的代码中改的,非常感谢原作者。
代码下面贴出来:
#include &stdafx.h&
__cplusplus
extern &C&
#include &libavcodec/avcodec.h&
#include &libavformat/avformat.h&
#include &libswscale/swscale.h&
#include &libavdevice/avdevice.h&
#include &libavutil/audio_fifo.h&
#pragma comment(lib, &avcodec.lib&)
#pragma comment(lib, &avformat.lib&)
#pragma comment(lib, &avutil.lib&)
#pragma comment(lib, &avdevice.lib&)
#pragma comment(lib, &avfilter.lib&)
//#pragma comment(lib, &avfilter.lib&)
//#pragma comment(lib, &postproc.lib&)
//#pragma comment(lib, &swresample.lib&)
#pragma comment(lib, &swscale.lib&)
#ifdef __cplusplus
AVFormatContext *pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Audio_mic = NULL, *pFormatCtx_Out = NULL;
AVCodecContext *pCodecCtx_V
AVFifoBuffer *fifo_video = NULL;
AVAudioFifo
*fifo_audio = NULL;
AVAudioFifo
*fifo_audio_mic = NULL;
int VideoIndex, AudioIndex, AudioIndex_
CRITICAL_SECTION AudioSection, AudioSection_mic, VideoS
SwsContext *img_convert_
int frame_size = 0;
uint8_t *picture_buf = NULL, *frame_buf = NULL;
bool bCap =
bool bStop =
char* m_szPath = &test.mp4&;
int m_nFramerate = 15;
DWORD WINAPI startRecordThread(LPVOID lpParam);
DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam);
DWORD WINAPI AudioCapThreadProc(LPVOID lpParam);
DWORD WINAPI AudioMicCapThreadProc(LPVOID lpParam);
int OpenVideoCapture()
AVInputFormat *ifmt = av_find_input_format(&gdigrab&);
//这里可以加参数打开,例如可以指定采集帧率
AVDictionary *options = NULL;
char szFramerate[4];
sprintf(szFramerate, &%d&, m_nFramerate);
av_dict_set(&options, &framerate&, szFramerate, NULL);
//av_dict_set(&options,&offset_x&,&20&,0);
//The distance from the top edge of the screen or desktop
//av_dict_set(&options,&offset_y&,&40&,0);
//Video frame size. The default is to capture the full screen
//av_dict_set(&options,&video_size&,&320x240&,0);
if (avformat_open_input(&pFormatCtx_Video, &desktop&, ifmt, &options) != 0)
printf(&Couldn't open input stream.(无法打开视频输入流)\n&);
return -1;
if (avformat_find_stream_info(pFormatCtx_Video, NULL)&0)
printf(&Couldn't find stream information.(无法获取视频流信息)\n&);
return -1;
if (pFormatCtx_Video-&streams[0]-&codec-&codec_type != AVMEDIA_TYPE_VIDEO)
printf(&Couldn't find video stream information.(无法获取视频流信息)\n&);
return -1;
pCodecCtx_Video = pFormatCtx_Video-&streams[0]-&
pCodec_Video = avcodec_find_decoder(pCodecCtx_Video-&codec_id);
if (pCodec_Video == NULL)
printf(&Codec not found.(没有找到解码器)\n&);
return -1;
if (avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) & 0)
printf(&Could not open codec.(无法打开解码器)\n&);
return -1;
//pCodecCtx_Video-&bit_rate = 1000000;
//pCodecCtx_Video-&rc_min_rate = 128000;
//pCodecCtx_Video-&rc_max_rate = 2500000;
img_convert_ctx = sws_getContext(pCodecCtx_Video-&width, pCodecCtx_Video-&height, pCodecCtx_Video-&pix_fmt,
pCodecCtx_Video-&width, pCodecCtx_Video-&height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
frame_size = avpicture_get_size(pCodecCtx_Video-&pix_fmt, pCodecCtx_Video-&width, pCodecCtx_Video-&height);
//申请30帧缓存
fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video-&width, pCodecCtx_Video-&height));
static char *dup_wchar_to_utf8(wchar_t *w)
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *)av_malloc(l);
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
int OpenAudioCapture()
//查找输入方式
AVInputFormat *pAudioInputFmt = av_find_input_format(&dshow&);
//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
audio=virtual-audio-capturer
char * psDevName = dup_wchar_to_utf8(L&audio=virtual-audio-capturer&);
if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt, NULL) & 0)
printf(&Couldn't open input stream.(无法打开音频输入流)\n&);
return -1;
if (avformat_find_stream_info(pFormatCtx_Audio, NULL)&0)
return -1;
if (pFormatCtx_Audio-&streams[0]-&codec-&codec_type != AVMEDIA_TYPE_AUDIO)
printf(&Couldn't find video stream information.(无法获取音频流信息)\n&);
return -1;
AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio-&streams[0]-&codec-&codec_id);
if (0 & avcodec_open2(pFormatCtx_Audio-&streams[0]-&codec, tmpCodec, NULL))
printf(&can not find or open audio decoder!\n&);
int OpenAudioCapture_mic()
//查找输入方式
AVInputFormat *pAudioInputFmt = av_find_input_format(&dshow&);
//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
char * psDevName = dup_wchar_to_utf8(L&audio=FrontMic (Realtek High Definiti&);
if (avformat_open_input(&pFormatCtx_Audio_mic, psDevName, pAudioInputFmt, NULL) & 0)
printf(&Couldn't open input stream.(无法打开音频输入流)\n&);
return -1;
if (avformat_find_stream_info(pFormatCtx_Audio_mic, NULL)&0)
return -1;
if (pFormatCtx_Audio_mic-&streams[0]-&codec-&codec_type != AVMEDIA_TYPE_AUDIO)
printf(&Couldn't find video stream information.(无法获取音频流信息)\n&);
return -1;
AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio_mic-&streams[0]-&codec-&codec_id);
if (0 & avcodec_open2(pFormatCtx_Audio_mic-&streams[0]-&codec, tmpCodec, NULL))
printf(&can not find or open audio decoder!\n&);
int OpenOutPut()
AVStream *pVideoStream = NULL, *pAudioStream = NULL, *pAudioStream_mic = NULL;
const char *outFileName = m_szP
avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);
if (pFormatCtx_Video-&streams[0]-&codec-&codec_type == AVMEDIA_TYPE_VIDEO)
AVCodecContext *videoCodecC
VideoIndex = 0;
pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
if (!pVideoStream)
printf(&can not new stream for output!\n&);
return -1;
//set codec context param
pVideoStream-&codec-&codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
pVideoStream-&codec-&height = pFormatCtx_Video-&streams[0]-&codec-&
pVideoStream-&codec-&width = pFormatCtx_Video-&streams[0]-&codec-&
pVideoStream-&codec-&time_base = pFormatCtx_Video-&streams[0]-&codec-&time_
pVideoStream-&codec-&sample_aspect_ratio = pFormatCtx_Video-&streams[0]-&codec-&sample_aspect_
// take first format from list of supported formats
pVideoStream-&codec-&pix_fmt = pFormatCtx_Out-&streams[VideoIndex]-&codec-&codec-&pix_fmts[0];
//pVideoStream-&codec-&bit_rate = 1000000;
//pVideoStream-&codec-&rc_min_rate = 128000;
//pVideoStream-&codec-&rc_max_rate = 2500000;
//open encoder
if (!pVideoStream-&codec-&codec)
printf(&can not find the encoder!\n&);
return -1;
if (pFormatCtx_Out-&oformat-&flags & AVFMT_GLOBALHEADER)
pVideoStream-&codec-&flags |= CODEC_FLAG_GLOBAL_HEADER;
if ((avcodec_open2(pVideoStream-&codec, pVideoStream-&codec-&codec, NULL)) & 0)
printf(&can not open the encoder\n&);
return -1;
if (pFormatCtx_Audio-&streams[0]-&codec-&codec_type == AVMEDIA_TYPE_AUDIO)
AVCodecContext *pOutputCodecC
AudioIndex = 1;
pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);
pAudioStream-&codec-&codec = avcodec_find_encoder(pFormatCtx_Out-&oformat-&audio_codec);
pOutputCodecCtx = pAudioStream-&
pOutputCodecCtx-&sample_rate = pFormatCtx_Audio-&streams[0]-&codec-&sample_
pOutputCodecCtx-&channel_layout = pFormatCtx_Out-&streams[0]-&codec-&channel_
pOutputCodecCtx-&channels = av_get_channel_layout_nb_channels(pAudioStream-&codec-&channel_layout);
if (pOutputCodecCtx-&channel_layout == 0)
pOutputCodecCtx-&channel_layout = AV_CH_LAYOUT_STEREO;
pOutputCodecCtx-&channels = av_get_channel_layout_nb_channels(pOutputCodecCtx-&channel_layout);
pOutputCodecCtx-&sample_fmt = pAudioStream-&codec-&codec-&sample_fmts[0];
AVRational time_base = { 1, pAudioStream-&codec-&sample_rate };
pAudioStream-&time_base = time_
//audioCodecCtx-&time_base = time_
pOutputCodecCtx-&codec_tag = 0;
if (pFormatCtx_Out-&oformat-&flags & AVFMT_GLOBALHEADER)
pOutputCodecCtx-&flags |= CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx-&codec, 0) & 0)
//编码器打开失败,退出程序
return -1;
if (pFormatCtx_Audio_mic-&streams[0]-&codec-&codec_type == AVMEDIA_TYPE_AUDIO)
AVCodecContext *pOutputCodecCtx_
AudioIndex_mic = 1;
pAudioStream_mic = avformat_new_stream(pFormatCtx_Out, NULL);
pAudioStream_mic-&codec-&codec = avcodec_find_encoder(pFormatCtx_Out-&oformat-&audio_codec);
pOutputCodecCtx_mic = pAudioStream_mic-&
pOutputCodecCtx_mic-&sample_rate = pFormatCtx_Audio_mic-&streams[0]-&codec-&sample_
pOutputCodecCtx_mic-&channel_layout = pFormatCtx_Out-&streams[0]-&codec-&channel_
pOutputCodecCtx_mic-&channels = av_get_channel_layout_nb_channels(pAudioStream_mic-&codec-&channel_layout);
if (pOutputCodecCtx_mic-&channel_layout == 0)
pOutputCodecCtx_mic-&channel_layout = AV_CH_LAYOUT_STEREO;
pOutputCodecCtx_mic-&channels = av_get_channel_layout_nb_channels(pOutputCodecCtx_mic-&channel_layout);
pOutputCodecCtx_mic-&sample_fmt = pAudioStream_mic-&codec-&codec-&sample_fmts[0];
AVRational time_base = { 1, pAudioStream_mic-&codec-&sample_rate };
pAudioStream_mic-&time_base = time_
//audioCodecCtx-&time_base = time_
pOutputCodecCtx_mic-&codec_tag = 0;
if (pFormatCtx_Out-&oformat-&flags & AVFMT_GLOBALHEADER)
pOutputCodecCtx_mic-&flags |= CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(pOutputCodecCtx_mic, pOutputCodecCtx_mic-&codec, 0) & 0)
//编码器打开失败,退出程序
return -1;
if (!(pFormatCtx_Out-&oformat-&flags & AVFMT_NOFILE))
if (avio_open(&pFormatCtx_Out-&pb, outFileName, AVIO_FLAG_WRITE) & 0)
printf(&can not open output file handle!\n&);
return -1;
if (avformat_write_header(pFormatCtx_Out, NULL) & 0)
printf(&can not write the header of the output file!\n&);
return -1;
int _tmain(int argc, _TCHAR* argv[])
av_register_all();
avdevice_register_all();
if (OpenVideoCapture() & 0)
return -1;
if (OpenAudioCapture() & 0)
return -1;
if (OpenAudioCapture_mic() & 0)
return -1;
if (OpenOutPut() & 0)
return -1;
InitializeCriticalSection(&VideoSection);
InitializeCriticalSection(&AudioSection);
InitializeCriticalSection(&AudioSection_mic);
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out-&streams[VideoIndex]-&codec-&pix_fmt,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&width, pFormatCtx_Out-&streams[VideoIndex]-&codec-&height);
picture_buf = new uint8_t[size];
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&pix_fmt,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&width,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&height);
//star cap screen thread
CreateThread(NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
//star cap audio thread
CreateThread(NULL, 0, AudioCapThreadProc, 0, 0, NULL);
//star cap audio mic thread
CreateThread(NULL, 0, AudioMicCapThreadProc, 0, 0, NULL);
int64_t cur_pts_v = 0, cur_pts_a = 0, cur_pts_a_m = 0;
int VideoFrameIndex = 0, AudioFrameIndex = 0, AudioFrameIndex_mic = 0;
if (bStop && bCap)
Sleep(2000);//简单的用sleep等待采集线程关闭
if (fifo_audio && fifo_audio_mic && fifo_video)
int sizeAudio = av_audio_fifo_size(fifo_audio);
int sizeAudio_mic = av_audio_fifo_size(fifo_audio_mic);
int sizeVideo = av_fifo_size(fifo_video);
//缓存数据写完就结束循环
if (av_audio_fifo_size(fifo_audio) &= pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size &&
av_audio_fifo_size(fifo_audio_mic) &= pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size &&
av_fifo_size(fifo_video) &= frame_size && !bCap)
if (av_compare_ts(cur_pts_v, pFormatCtx_Out-&streams[VideoIndex]-&time_base,
cur_pts_a, pFormatCtx_Out-&streams[AudioIndex]-&time_base) &= 0 &&
av_compare_ts(cur_pts_v, pFormatCtx_Out-&streams[VideoIndex]-&time_base,
cur_pts_a_m, pFormatCtx_Out-&streams[AudioIndex_mic]-&time_base) &= 0)
//read data from fifo
if (av_fifo_size(fifo_video) & frame_size && !bCap)
cur_pts_v = 0x7
if (av_fifo_size(fifo_video) &= size)
EnterCriticalSection(&VideoSection);
av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
LeaveCriticalSection(&VideoSection);
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&pix_fmt,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&width,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&height);
//pts = n * ((1 / timbase)/ fps);
picture-&pts = VideoFrameIndex * ((pFormatCtx_Video-&streams[0]-&time_base.den / pFormatCtx_Video-&streams[0]-&time_base.num) / m_nFramerate);
int got_picture = 0;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
int ret = avcodec_encode_video2(pFormatCtx_Out-&streams[VideoIndex]-&codec, &pkt, picture, &got_picture);
if (ret & 0)
//编码错误,不理会此帧
if (got_picture == 1)
pkt.stream_index = VideoI
pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video-&streams[0]-&time_base,
pFormatCtx_Out-&streams[VideoIndex]-&time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video-&streams[0]-&time_base,
pFormatCtx_Out-&streams[VideoIndex]-&time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = ((pFormatCtx_Out-&streams[0]-&time_base.den / pFormatCtx_Out-&streams[0]-&time_base.num) / m_nFramerate);
cur_pts_v = pkt.
ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
//delete[] pkt.
av_free_packet(&pkt);
VideoFrameIndex++;
if (av_compare_ts(cur_pts_v, pFormatCtx_Out-&streams[VideoIndex]-&time_base,
cur_pts_a, pFormatCtx_Out-&streams[AudioIndex]-&time_base) & 0)
if (NULL == fifo_audio)
//还未初始化fifo
if (av_audio_fifo_size(fifo_audio) & pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size && !bCap)
cur_pts_a = 0x7
if (av_audio_fifo_size(fifo_audio) &=
(pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size & 0 ? pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size : 1024))
frame = av_frame_alloc();
frame-&nb_samples = pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size&0 ? pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size : 1024;
frame-&channel_layout = pFormatCtx_Out-&streams[AudioIndex]-&codec-&channel_
frame-&format = pFormatCtx_Out-&streams[AudioIndex]-&codec-&sample_
frame-&sample_rate = pFormatCtx_Out-&streams[AudioIndex]-&codec-&sample_
av_frame_get_buffer(frame, 0);
EnterCriticalSection(&AudioSection);
av_audio_fifo_read(fifo_audio, (void **)frame-&data,
(pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size & 0 ? pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_size : 1024));
LeaveCriticalSection(&AudioSection);
if (pFormatCtx_Out-&streams[0]-&codec-&sample_fmt != pFormatCtx_Audio-&streams[AudioIndex]-&codec-&sample_fmt
|| pFormatCtx_Out-&streams[0]-&codec-&channels != pFormatCtx_Audio-&streams[AudioIndex]-&codec-&channels
|| pFormatCtx_Out-&streams[0]-&codec-&sample_rate != pFormatCtx_Audio-&streams[AudioIndex]-&codec-&sample_rate)
//如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做
AVPacket pkt_
av_init_packet(&pkt_out);
int got_picture = -1;
pkt_out.data = NULL;
pkt_out.size = 0;
frame-&pts = AudioFrameIndex * pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_
if (avcodec_encode_audio2(pFormatCtx_Out-&streams[AudioIndex]-&codec, &pkt_out, frame, &got_picture) & 0)
printf(&can not decoder a frame&);
av_frame_free(&frame);
if (got_picture)
pkt_out.stream_index = AudioI
pkt_out.pts = AudioFrameIndex * pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_
pkt_out.dts = AudioFrameIndex * pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_
pkt_out.duration = pFormatCtx_Out-&streams[AudioIndex]-&codec-&frame_
cur_pts_a = pkt_out.
Write_Log(LOG_INFO, &Current audio pts is %d&, pkt_out.pts);
int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
av_free_packet(&pkt_out);
AudioFrameIndex++;
if (av_compare_ts(cur_pts_v, pFormatCtx_Out-&streams[VideoIndex]-&time_base,
cur_pts_a_m, pFormatCtx_Out-&streams[AudioIndex_mic]-&time_base) & 0)
if (NULL == fifo_audio_mic)
//还未初始化fifo
if (av_audio_fifo_size(fifo_audio_mic) & pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size && !bCap)
cur_pts_a_m = 0x7
if (av_audio_fifo_size(fifo_audio_mic) &=
(pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size & 0 ? pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size : 1024))
AVFrame *frame_
frame_mic = av_frame_alloc();
frame_mic-&nb_samples = pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size&0 ? pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size : 1024;
frame_mic-&channel_layout = pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&channel_
frame_mic-&format = pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&sample_
frame_mic-&sample_rate = pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&sample_
av_frame_get_buffer(frame_mic, 0);
EnterCriticalSection(&AudioSection_mic);
av_audio_fifo_read(fifo_audio_mic, (void **)frame_mic-&data,
(pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size & 0 ? pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_size : 1024));
LeaveCriticalSection(&AudioSection_mic);
AVPacket pkt_out_
av_init_packet(&pkt_out_mic);
int got_picture_mic = -1;
pkt_out_mic.data = NULL;
pkt_out_mic.size = 0;
frame_mic-&pts = AudioFrameIndex_mic * pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_
if (avcodec_encode_audio2(pFormatCtx_Out-&streams[AudioIndex_mic]-&codec, &pkt_out_mic, frame_mic, &got_picture_mic) & 0)
printf(&can not decoder a frame&);
av_frame_free(&frame_mic);
if (got_picture_mic)
pkt_out_mic.stream_index = AudioIndex_
pkt_out_mic.pts = AudioFrameIndex_mic * pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_
pkt_out_mic.dts = AudioFrameIndex_mic * pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_
pkt_out_mic.duration = pFormatCtx_Out-&streams[AudioIndex_mic]-&codec-&frame_
cur_pts_a_m = pkt_out_mic.
Write_Log(LOG_INFO, &&&&&&Current audio(mic) pts is %d&, pkt_out_mic.pts);
int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out_mic);
av_free_packet(&pkt_out_mic);
AudioFrameIndex_mic++;
delete[] picture_
av_fifo_free(fifo_video);
av_audio_fifo_free(fifo_audio);
av_audio_fifo_free(fifo_audio_mic);
av_write_trailer(pFormatCtx_Out);
avio_close(pFormatCtx_Out-&pb);
avformat_free_context(pFormatCtx_Out);
if (pFormatCtx_Video != NULL)
avformat_close_input(&pFormatCtx_Video);
pFormatCtx_Video = NULL;
if (pFormatCtx_Audio != NULL)
avformat_close_input(&pFormatCtx_Audio);
pFormatCtx_Audio = NULL;
if (pFormatCtx_Audio_mic != NULL)
avformat_close_input(&pFormatCtx_Audio_mic);
pFormatCtx_Audio_mic = NULL;
DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam)
AVP/* = (AVPacket *)av_malloc(sizeof(AVPacket))*/;
AVFrame *pF
pFrame = av_frame_alloc();
AVFrame *picture = av_frame_alloc();
int size = avpicture_get_size(pFormatCtx_Out-&streams[VideoIndex]-&codec-&pix_fmt,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&width, pFormatCtx_Out-&streams[VideoIndex]-&codec-&height);
//picture_buf = new uint8_t[size];
avpicture_fill((AVPicture *)picture, picture_buf,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&pix_fmt,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&width,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&height);
FILE *p = NULL;
p = fopen(&proc_test.yuv&, &wb+&);
av_init_packet(&packet);
int height = pFormatCtx_Out-&streams[VideoIndex]-&codec-&
int width = pFormatCtx_Out-&streams[VideoIndex]-&codec-&
int y_size = height*
while (bCap)
packet.data = NULL;
packet.size = 0;
if (av_read_frame(pFormatCtx_Video, &packet) & 0)
if (packet.stream_index == 0)
if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) & 0)
printf(&Decode Error.(解码错误)\n&);
if (got_picture)
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame-&data, pFrame-&linesize, 0,
pFormatCtx_Out-&streams[VideoIndex]-&codec-&height, picture-&data, picture-&linesize);
if (av_fifo_space(fifo_video) &= size)
EnterCriticalSection(&VideoSection);
av_fifo_generic_write(fifo_video, picture-&data[0], y_size, NULL);
av_fifo_generic_write(fifo_video, picture-&data[1], y_size / 4, NULL);
av_fifo_generic_write(fifo_video, picture-&data[2], y_size / 4, NULL);
LeaveCriticalSection(&VideoSection);
av_free_packet(&packet);
//Sleep(50);
av_frame_free(&pFrame);
av_frame_free(&picture);
//delete[] picture_
DWORD WINAPI AudioCapThreadProc(LPVOID lpParam)
frame = av_frame_alloc();
while (bCap)
pkt.data = NULL;
pkt.size = 0;
if (av_read_frame(pFormatCtx_Audio, &pkt) & 0)
if (avcodec_decode_audio4(pFormatCtx_Audio-&streams[0]-&codec, frame, &gotframe, &pkt) & 0)
av_frame_free(&frame);
printf(&can not decoder a frame&);
av_free_packet(&pkt);
if (!gotframe)
//没有获取到数据,继续下一次
if (NULL == fifo_audio)
fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio-&streams[0]-&codec-&sample_fmt,
pFormatCtx_Audio-&streams[0]-&codec-&channels, 30 * frame-&nb_samples);
int buf_space = av_audio_fifo_space(fifo_audio);
if (av_audio_fifo_space(fifo_audio) &= frame-&nb_samples)
EnterCriticalSection(&AudioSection);
av_audio_fifo_write(fifo_audio, (void **)frame-&data, frame-&nb_samples);
LeaveCriticalSection(&AudioSection);
av_frame_free(&frame);
DWORD WINAPI AudioMicCapThreadProc(LPVOID lpParam)
frame = av_frame_alloc();
while (bCap)
pkt.data = NULL;
pkt.size = 0;
if (av_read_frame(pFormatCtx_Audio_mic, &pkt) & 0)
if (avcodec_decode_audio4(pFormatCtx_Audio_mic-&streams[0]-&codec, frame, &gotframe, &pkt) & 0)
av_frame_free(&frame);
printf(&can not decoder a frame&);
av_free_packet(&pkt);
if (!gotframe)
//没有获取到数据,继续下一次
if (NULL == fifo_audio_mic)
fifo_audio_mic = av_audio_fifo_alloc(pFormatCtx_Audio_mic-&streams[0]-&codec-&sample_fmt,
pFormatCtx_Audio_mic-&streams[0]-&codec-&channels, 30 * frame-&nb_samples);
int buf_space = av_audio_fifo_space(fifo_audio_mic);
if (av_audio_fifo_space(fifo_audio_mic) &= frame-&nb_samples)
EnterCriticalSection(&AudioSection_mic);
av_audio_fifo_write(fifo_audio_mic, (void **)frame-&data, frame-&nb_samples);
LeaveCriticalSection(&AudioSection_mic);
av_frame_free(&frame);
但是我编码时,只能录制屏幕和virtual-audio-capturer,或者屏幕和麦克风,不能同时录制三个,请问这是为什么?
路过的大神,请问一下是哪里不对了?
但是我编码时,只能录制屏幕和virtual-audio-capturer,或者屏幕和麦克风,不能同时录制三个,请问这是为什么?
&&相关文章推荐
参考知识库
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
访问:9698次
排名:千里之外
(1)(1)(1)(2)(1)(1)(1)}

我要回帖

更多关于 手机录像外置麦克风 的文章

更多推荐

版权声明:文章内容来源于网络,版权归原作者所有,如有侵权请点击这里与我们联系,我们将及时删除。

点击添加站长微信